branch_name
stringclasses
149 values
text
stringlengths
23
89.3M
directory_id
stringlengths
40
40
languages
listlengths
1
19
num_files
int64
1
11.8k
repo_language
stringclasses
38 values
repo_name
stringlengths
6
114
revision_id
stringlengths
40
40
snapshot_id
stringlengths
40
40
refs/heads/master
<repo_name>mbortola/python-jgitflow-facilities<file_sep>/release_start.py from git import Repo import os.path as osp import shutil import argparse from constants import * from secrets import PROJECT_GIT env = '' def get_start_branch(repo, version): """ Check for release branch, if found ask for confirm, else return develop :param version: :param repo: :return: """ for branch in repo.remotes['origin'].refs: result = re.match('origin/release/(.*)', branch.name) if result: release_version = result.group(1) if release_version != version: sys.stdout.write("Release branch found [%s], but different from input version, bye" % branch.name) sys.exit() sys.stdout.write("Release branch found [%s], start from here? [Y/n]" % branch.name) choice = raw_input().lower() if choice == 'y' or choice == '': return False, 'release/%s' % result.group(1) else: sys.stdout.write("ok, then do it by yourself.") sys.exit() return True, 'develop' def find_next_rc(repo, version): max_rc = 0 for tag in repo.tags: if tag.name.startswith(version): result = re.match('.*-RC(\d+)', tag.name) if not result: sys.stderr.write("You made some trouble with tag names, good luck") sys.exit() rc_found = int(result.group(1)) if max_rc < rc_found: max_rc = rc_found if max_rc == 0: sys.stderr.write("No valid tag found for this version, what have you done??") sys.exit() next_rc = rc_found + 1 sys.stdout.write('Create %s-RC%s? [Y/n]' % (version, next_rc)) choice = raw_input().lower() if choice == 'y' or choice == '': return next_rc else: sys.stdout.write("ok, then do it by yourself.") sys.exit() def release_start(version, project): """ :param version: :param project: """ project_path = get_project_path(project) gt = Repo.clone_from(PROJECT_GIT % project, project_path).git # check for a release branch repo = Repo(project_path) is_develop, branch_to_checkout = get_start_branch(repo, version) gt.checkout(branch_to_checkout) execute_command('ssh-add ~/.ssh/id_rsa') root_path = find_root_project(project_path) print "Root path :%s" % root_path os.chdir(root_path) if is_develop: # more stuff to do development_version = calculate_next_version(version) sys.stdout.write('Create release branch release/%s? [Y/n]' % version) choice = raw_input().lower() if choice != 'y' and choice != '': sys.stdout.write("ok, then do it by yourself.") sys.exit() execute_command(MAVEN_RELEASE_START_CMD % (env, version, development_version)) rc_number = '1' else: # get the RC tag, add +1 and retag rc_number = find_next_rc(repo, version) # repo.git.custom_environment(GIT_SSH='/home/michele/mvn-jgitflow') tag_name = TAG_RC_NAME % (version, rc_number) execute_command(MAVEN_VERSION_SET_CMD % tag_name) execute_command(MAVEN_VERSION_COMMIT_CMD) for item in repo.index.diff(None): print 'adding %s' % item.a_path pom_to_add = osp.join(project_path, item.a_path) repo.index.add([pom_to_add]) print repo.git.status() repo.index.commit(GIT_COMMIT_COMMENT % tag_name) release_branch = 'release/%s' % version repo.create_tag(ref=release_branch, path=tag_name) repo.remote(name='origin').push(tags=True) repo.remote(name='origin').push(all=True) # merge request from release to master if rc_number == '1': print "Creating merge request from release to master" create_merge_request(release_branch, project) print "Done (I hopes)!!" shutil.rmtree(project_path) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Input parameters') parser.add_argument('version', metavar='version', type=str, help='The version to release (without SNAPSHOT or RC labels)') parser.add_argument('repo_name', metavar='repository', type=str, help='Repository name') parser.add_argument('-j', '--jre', nargs='?', type=str, default='6', help='JRE version') parser.add_argument('-t', '--team', nargs='?', type=str, default='lcert', help='Development Team') args = parser.parse_args() print '%s %s ' % (args.version, args.repo_name) env = set_environment(args.jre) try: release_start(args.version, args.repo_name) except Exception as err: print("error: {0}".format(err)) shutil.rmtree(get_project_path(args.repo_name)) <file_sep>/release_client.py from git import Repo import shutil import tempfile import argparse from constants import * # clients usually are a java-6 affair.. env = '' def release_client(version, project): """ :param project: the project to release :param version: the version to release """ # execute_command(CHANGE_JAVA_VERSION_COMMAND % 6) project_path = '%s/%s' % (tempfile.gettempdir(), project) gt = Repo.clone_from(PROJECT_GIT % project, project_path).git # check for a release branch repo = Repo(project_path) gt.checkout('develop') execute_command(ADD_SSH_CERT_CMD) root_path = find_root_project(project_path) print "Root path :%s" % root_path os.chdir(root_path) # more stuff to do development_version = calculate_next_version(version) sys.stdout.write('Create tag %s? [Y/n]' % version) if raw_input().lower() != 'y' and raw_input() != '': sys.stdout.write("ok, then do it by yourself.") sys.exit() execute_command(MAVEN_RELEASE_START_CMD % (env, version, development_version)) create_merge_request('release/%s' % version, project) execute_command(MAVEN_RELEASE_FINISH_CMD % env) remote = repo.remote(name='origin') remote.push(all=True) remote.push(tags=True) # remote.push(refspec=':release/%s' % version) print "Done (I hopes)!!" shutil.rmtree(project_path) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Input parameters') parser.add_argument('version', metavar='version', type=str, help='The version to release (without SNAPSHOT or RC labels)') parser.add_argument('repo_name', metavar='repository', type=str, help='Repository name') parser.add_argument('-j', '--jre', nargs='?', type=str, default='6', help='JRE version') args = parser.parse_args() print '%s %s ' % (args.version, args.repo_name) env = set_environment(args.jre) try: release_client(args.version, args.repo_name) except: shutil.rmtree(get_project_path(args.repo_name)) <file_sep>/constants.py import sys import subprocess import fnmatch import os import re import gitlab.v4 import gitlab import tempfile from secrets import GITLAB_HOST from secrets import API_TOKEN MAVEN_RELEASE_START_CMD = '%s jgitflow:release-start -DenableSshAgent=true -DreleaseVersion=%s -DdevelopmentVersion=%s' MAVEN_RELEASE_FINISH_CMD = '%s jgitflow:release-finish -DenableSshAgent=true' MAVEN_VERSION_SET_CMD = 'mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' MAVEN_VERSION_COMMIT_CMD = 'mvn versions:commit' ADD_SSH_CERT_CMD = 'ssh-add ~/.ssh/id_rsa' POM = 'pom.xml' TAG_NAME = '%s' TAG_RC_NAME = '%s-RC%s' GIT_COMMIT_COMMENT = 'Release Candidate %s' API_TOKEN_ENV = "API_TOKEN" def execute_command(command): process = subprocess.call([command], shell=True) print "{%s}: %s" % (command, process) if 0 != process: sys.stderr.write("Error on command execution, bye") sys.exit() def find_root_project(project_path): def shortest_path(paths): return min(paths, key=lambda path: path.count('/')) matches = [] for root, dirnames, filenames in os.walk(project_path): for filename in fnmatch.filter(filenames, POM): matches.append(root) print '%s - %s' % (root, filename) return shortest_path(matches) def create_merge_request(release_branch, project_name, approvers): gl = gitlab.Gitlab(GITLAB_HOST, API_TOKEN) gl.auth() project = gl.projects.get('lcert/%s' % project_name) print 'Project id: %s' % project.id mr = project.mergerequests.create({'source_branch': release_branch, 'target_branch': 'master', 'title': 'Release_finish', 'description': 'Automatically opened Merge-request'}, project_id=project.id) print 'Created merge request: %s' % mr # set the approvers mr_approvals = mr.approvals.get() mr_approvals.approvals_before_merge = approvers mr_approvals.save() print 'Setted approvals to %s' % approvers return mr def calculate_next_version(release_version): regex_result = re.search('(\d+).(\d+).(\d+)', release_version) last_number = 1 + int(regex_result.group(3)) return '%s.%s.%s-SNAPSHOT' % (regex_result.group(1), regex_result.group(2), last_number) def set_environment(jre): # sys.stdout.write("select Java environment? [6|7|8]: ") # choice = int(raw_input().lower()) choice = int(jre) if 6 == choice: # environment = choice command = 'mvn3_0_5_jdk16' elif 7 == choice: command = 'mvn3_3_9_jdk17' elif 8 == choice: command = 'mvn' # default is mvn 3.3.9 and jdk 8 else: sys.stdout.write("what?? No way") sys.exit() print "Setting environment for java %s" % choice # execute_command(JAVA_MAVEN_ENV_CMD % environment) return command def get_project_path(project): return '%s/%s' % (tempfile.gettempdir(), project) def handle_opened_mr(project): api_key = os.environ.get(API_TOKEN_ENV) gl = gitlab.Gitlab(GITLAB_HOST, api_key) gl.auth() project_id = gl.projects.get('lcert/%s' % project).id list = gl.project_mergerequests.list(project_id=project_id, state='opened') if len(list) == 1: print "You have an opened Merge request, force closing [y/N]? " choice = raw_input().lower() if choice != 'y': sys.stdout.write("ok, then do your stuff.") sys.exit() print "Ok, i will close it" list[0].state_event = 'close' list[0].save() elif len(list) > 1: print "You have too much opened merge request, Bye! " sys.exit() else: print "No opened Merge request found" <file_sep>/slack_notifier.py __author__ = 'michele' import requests import json from secrets import SLACK_TOKEN base_url = 'https://slack.com/api/' method_auth_test = 'auth.test' method_channel_list = 'channels.list' method_groups_list = 'groups.list' method_im_list = 'im.list' method_chat_delete = 'chat.delete' method_chat_post = 'chat.postMessage' def _token(): return '?token=' + SLACK_TOKEN def send_message(channel_id, message, username, as_user=False): parameters = { 'token': SLACK_TOKEN, 'channel': channel_id, 'as_user': as_user, 'username': username, 'text': message } print json.dumps(parameters) response = requests.get(base_url + method_chat_post, params=parameters) return json.loads(response.content) def get_base_info(): response = requests.get(base_url + method_auth_test + '?token=' + SLACK_TOKEN) return json.loads(response.content) def get_channel_list(exclude_archieved=False): if exclude_archieved: exclude_archieved_param = '&exclude_archieved=1' else: exclude_archieved_param = '&exclude_archieved=0' response = requests.get(base_url + method_channel_list + _token() + exclude_archieved_param) return json.loads(response.content).get('channels') def get_groups_list(exclude_archieved=False): if exclude_archieved: exclude_archieved_param = '&exclude_archieved=1' else: exclude_archieved_param = '&exclude_archieved=0' response = requests.get(base_url + method_groups_list + _token() + exclude_archieved_param) return json.loads(response.content).get('groups') def get_im_list(): response = requests.get(base_url + method_im_list + _token()) return json.loads(response.content).get('ims') def delete_message(ts, channel): ts_argument = '&ts=' + ts channel_argument = '&channel=' + channel response = requests.get(base_url + method_chat_delete + _token() + ts_argument + channel_argument) def _get_history(chat_type, channel, lastest=None, oldest=None, count=100, include_unreads=True): channel_arg = '&channel=' + channel lastest_arg = ('&lastest=' + lastest) if lastest else '' oldest_arg = ('&oldtest=' + oldest) if oldest else '' count_arg = '&count=' + str(count) include_unreads_arg = '&unreads=1' if include_unreads else '' response = requests.get(base_url + chat_type + '.history' + _token() + channel_arg + lastest_arg + oldest_arg + count_arg + include_unreads_arg) response_dict = json.loads(response.content) return response_dict.get('messages'), response_dict.get('has_more') def delete_all_channel_messages(channel_type, channel): delete_all = lambda x: delete_message(x.get('ts'), channel) messages, has_more = _get_history(channel_type, channel) map(delete_all, messages) while has_more: messages, has_more = _get_history(channel_type, channel) map(delete_all, messages) def find_group_id_from_name(group_list, group_name): for group in group_list: if group.get('name') == group_name: return group.get('id') return None if __name__ == "__main__": base_info = send_message('G720WLR7B', username='Libanese del bar', message='OK') print base_info <file_sep>/release_finish.py from git import Repo import os.path as osp import shutil import argparse from constants import * env = '' def get_start_branch(repo): """ Check for release branch, if found ask for confirm, else return develop :param repo: :return: """ for branch in repo.remotes['origin'].refs: result = re.match('origin/release/(.*)', branch.name) if result: release_version = result.group(1) sys.stdout.write("Release branch found [%s], start from here? [Y/n]" % branch.name) choice = raw_input().lower() if choice == 'y' or choice == '': return release_version else: sys.stderr.write("Ok, as you wish, bye!!") sys.exit() sys.stderr.write("No release branch found!!") sys.exit() def release_finish(project): """ :param project: """ execute_command(ADD_SSH_CERT_CMD) # execute_command(CHANGE_JAVA_HOME_COMMAND % JAVA_TARGET) project_path = get_project_path(project) gt = Repo.clone_from(PROJECT_GIT % project, project_path).git # check for a release branch repo = Repo(project_path) version = get_start_branch(repo) release_branch = 'release/%s' % version gt.checkout(release_branch) root_path = find_root_project(project_path) print "Root path :%s" % root_path os.chdir(root_path) execute_command(MAVEN_VERSION_SET_CMD % version) execute_command(MAVEN_VERSION_COMMIT_CMD) for item in repo.index.diff(None): print 'adding %s' % item.a_path pom_to_add = osp.join(project_path, item.a_path) repo.index.add([pom_to_add]) print repo.git.status() repo.index.commit(GIT_COMMIT_COMMENT % version) #handle_opened_mr(project) # TODO do it in release_start, in the end #create_merge_request(release_branch, project) execute_command(MAVEN_RELEASE_FINISH_CMD % env) remote = repo.remote(name='origin') remote.push(all=True) remote.push(tags=True) remote.push(refspec=':%s' % release_branch) print "Done (I hopes)!!" shutil.rmtree(project_path) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Input parameters') parser.add_argument('repo_name', metavar='repository', type=str, help='Repository name') parser.add_argument('-j', '--jre', nargs='?', type=str, default='6', help='JRE version') parser.add_argument('-t', '--team', nargs='?', type=str, default='lcert', help='Development Team') args = parser.parse_args() print '%s ' % args.repo_name env = set_environment(args.jre) try: release_finish(args.repo_name) except Exception as err: print("error: {0}".format(err)) shutil.rmtree(get_project_path(args.repo_name)) <file_sep>/README.md # python-jgitflow-facilities <file_sep>/gitlab_api_playground.py import gitlab.v4 import gitlab import slack_notifier from secrets import * from datetime import datetime from datetime import timedelta import json merge_requests = [] approvers_stats = {} DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f' APPROVED_MR_TEXT = 'approved this merge request' NOW = datetime.now() TWO_WEEKS_AGO = NOW - timedelta(days=7) def scan_project(project): try: mrs = project.mergerequests.list() except gitlab.exceptions.GitlabListError as err: print("error: {0}".format(err)) return for mr in mrs: days_passed = (NOW - datetime.strptime(mr.created_at[:-6], DATETIME_FORMAT)).days if valid_merge_request(mr): approvals = mr.approvals.get() discussions = mr.discussions.list() for discussion in discussions: notes = discussion.attributes['notes'] for note in notes: note_created_at_days = (NOW - datetime.strptime(note['created_at'][:-6], DATETIME_FORMAT)).days if note['body'] == APPROVED_MR_TEXT and 14 > note_created_at_days: user = note['author']['name'] fill_approvals_statistics(mr, user) if mr.state == 'opened': you_have_approved = False merge_request = {} for approver in approvals.approved_by: if approver.get('user').get('id') == YOUR_USER_ID: you_have_approved = True if approvals.approvals_left > 0: merge_request['url'] = mr.web_url merge_request['approvals_left'] = approvals.approvals_left merge_request['approved_by_you'] = you_have_approved merge_request['days'] = days_passed if 'master' == mr.target_branch: print 'skipped mr to master' else: merge_requests.append(merge_request) def fill_approvals_statistics(merge_request, user): if user in approvers_stats: approvers_stats[user]['approvals'].append(merge_request.web_url) else: item = {'approvals': []} item.get('approvals').append(merge_request.web_url) approvers_stats[user] = item def valid_merge_request(merge_request): # days_passed = (NOW - datetime.strptime(merge_request.created_at, DATETIME_FORMAT)).days upd_days_passed = (NOW - datetime.strptime(merge_request.updated_at[:-6], DATETIME_FORMAT)).days if upd_days_passed > 14 and merge_request.state != 'opened': # invalid mr return False return True def print_result(): sorted_list = sorted(merge_requests, key=lambda k: k['days'], reverse=True) string_list = [] for item in sorted_list: string_list.append("Merge request: %s Approvals left %s Opened %s days ago.\n" % (item['url'], item.get('approvals_left'), item['days'])) print ''.join(string_list) print json.dumps(approvers_stats) def get_opened_merge_requests(): current_page = 1 per_page = 30 gl = gitlab.Gitlab(GITLAB_HOST, API_TOKEN) gl.auth() iterate = True while iterate: project_list = gl.projects.list(page=current_page, per_page=per_page) print 'Readed %s projects at page %s' % (len(project_list), current_page) for prj in project_list: scan_project(prj) if len(project_list) < per_page: iterate = False current_page = current_page + 1 if __name__ == "__main__": get_opened_merge_requests() print 'Scan Finished!' print_result() # slack_notifier.send_message(SLACK_CR_CHANNEL, message=message, username='BOT')
7aafb363291fbede0b3128f67eb4a6a87db16b50
[ "Markdown", "Python" ]
7
Python
mbortola/python-jgitflow-facilities
2609ac1a6dd4decef2a7a5a553b6cb77e8f636e3
4aee6929c6c5f1b7c889d7e431f9cf8dad03c0a8
refs/heads/master
<file_sep><?php $img = $_POST['image']; $folderPath = "people/images/"; $image_parts = explode(";base64,", $img); $image_type_aux = explode("image/", $image_parts[0]); $image_type = $image_type_aux[1]; $image_base64 = base64_decode($image_parts[1]); $fileName = uniqid() . '.jpg'; $file = $folderPath . $fileName; file_put_contents($file, $image_base64); $name = $_POST['name']; $relation = $_POST['relation']; $text = $name." - ".$relation; $namesPath = "people/"; $file = $namesPath.'names.txt'; $current = file_get_contents($file); $current .= $text."\n"; file_put_contents($file, $current); header("Location: index.php"); ?> <file_sep><?php ?> <html> <head> <title>MindEye</title> <link href="https://fonts.googleapis.com/css?family=Dosis:800&display=swap" rel="stylesheet"> <link href="https://fonts.googleapis.com/css?family=Catamaran:400,600,700" rel="stylesheet"> <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/webcamjs/1.0.25/webcam.min.js"></script> <style type = "text/css"> body { margin: 0; padding: 0; background-image: linear-gradient(141deg, #9fb8ad 0%, #1fc8db 51%, #2cb5e8 75%); max-height: 100%; background-attachment: fixed; background-position: center; background-repeat: no-repeat; background-size: cover; } p { margin-bottom: 40px !important; } ::placeholder { color: white; opacity: 1; } .title { color: rgba(255, 251, 244, 0.95); font-family: "Dosis", sans-serif; font-size:95px; text-align: center; margin-top: 30px; } .header { font-size: 70px; margin-bottom: 70px; } .submit { font-family: "Catamaran", sans-serif; font-size:19px; padding-bottom: 5px; padding-top: 5px; padding-left: 13px; padding-right: 13px; margin-top: 10px; margin-right: 0px; color: #ffffff; border: 0px solid; background-color: rgba(255, 251, 244, 0.5); cursor: pointer; transition-duration: 0.3s; } .submit:hover { background-color: rgba(255, 251, 244, 0.75); } #results{ margin-top: 10px; width:490px; height:370px; border: 2px solid #ffffff; border-radius: 15px; overflow: hidden; margin-left:auto; margin-right:0; text-align: left; } #takepic { float: left; margin-top: 20px; } #submit { float: right; } #pics { float: right; width:calc(50% - 40px); margin-left:40px; text-align: right; } #pics2 { margin-right:40px; text-align: left; float:right; width:calc(50% - 40px); } .int{ float: right; width: calc(202.31px - 5px); font-family: "Catamaran", sans-serif; border: 2px solid rgba(255, 251, 244, 0.5); font-size: 19px; color: #ffffff; transition-duration: 0.3s; background-color: transparent; margin-top: 10px; margin-left: 5px; padding-bottom: 3.5px; padding-top: 3.5px; padding-left: 13px; padding-right: 13px; } .int:focus { outline: none; } </style> </head> <body> <div class="header"> <p class="title">mindeye</p> </div> <div class = "container"> <form method="POST" action="storeImage.php"> <div class="row"> <div id="pics"> <div id="my_camera"></div> <input type=button value="Take Snapshot" onClick="take_snapshot()" id = "takepic" class="submit"> <input type="hidden" name="image" class="image-tag"> </div> <div id="pics2"> <div id="results"></div> <div class="col-md-12 text-center"> <br/> <input class="int" type="text" placeholder = "Name" name = "name"> <input class="int" type="text" placeholder = "Relation" name = "relation"> <button class="submit" id = "submit">Submit</button> </div> </div> </div> </form> </div> <script language="JavaScript"> Webcam.set({ width: 490, height: 390, image_format: 'jpeg', jpeg_quality: 180 }); Webcam.attach( '#my_camera' ); function take_snapshot() { Webcam.snap( function(data_uri) { $(".image-tag").val(data_uri); document.getElementById('results').innerHTML = '<img src="'+data_uri+'"/>'; } ); } </script> </body> </html> <file_sep># mindeye Tackling the problem of Alzheimer's one name at a time
44b7eb02f52ef928e5dd973f7257c6d29c1ddced
[ "Markdown", "PHP" ]
3
PHP
Dylan102938/mindeye
8298b8af9cafdc7b0ea95b1de32bd55979038e95
42dfbf4b725752cdd66e2c719b57921dbd64a74a
refs/heads/master
<file_sep>/* * File: app/view/MyContainer1.js * * This file was generated by Sencha Architect version 3.0.3. * http://www.sencha.com/products/architect/ * * This file requires use of the Sencha Touch 2.3.x library, under independent license. * License of Sencha Architect does not include license for Sencha Touch 2.3.x. For more * details see http://www.sencha.com/license or contact <EMAIL>. * * This file will be auto-generated each and everytime you save your project. * * Do NOT hand edit this file. */ Ext.define('MyAppOne.view.MyContainer1', { extend: 'Ext.Container', requires: [ 'Ext.dataview.List', 'Ext.XTemplate' ], config: { layout: 'fit', modal: false, items: [ { xtype: 'list', ui: 'round', modal: false, itemTpl: [ '<div>{text}</div>' ], grouped: true } ] } });
1439dce5ac751c6071699bb3045d3448c3612cf0
[ "JavaScript" ]
1
JavaScript
tanjaposch/MyAppOne
5d3aeae431a3d0eea55ca45363fb412f109b99c9
da45e0cee90859cd7e37354b3027edb079741708
refs/heads/master
<repo_name>kumarsandeep91/StackReact<file_sep>/src/helpers.ts export const dateConvert: Function = (ds: number) => { let dt = new Date(0); dt.setUTCSeconds(ds); return dt.toDateString(); };
f7570fc69fe751be0c92bc8e1fcff8c59511bc21
[ "TypeScript" ]
1
TypeScript
kumarsandeep91/StackReact
491308fe7bd7aee8d78252eda2cfacfae495a813
988069d7bea73f61b763b1bba61a170a74f8544f
refs/heads/master
<repo_name>jhh03260318/sn<file_sep>/src/store/index.js // 状态管理层 //引入vue import Vue from 'vue'; // 引入vuex import Vuex from 'vuex'; Vue.use(Vuex); import { state, mutations, getters } from './mutitons'; import actions from './actions'; import resign from './modules/resign'; // 暴露 export default new Vuex.Store({ state, mutations, actions, getters, modules: { resign } })<file_sep>/src/store/mutitons.js //作用范围:state,mutations,,getters //将所有的内容暴露出去,然后在store中的index.js中调用 //用于存储数据 export const state = { }; //用于修改state中的数据 export const mutations = { }; //用于优化传值 export const getters = { };<file_sep>/src/router/index.js import Vue from 'vue' import Router from 'vue-router' import login from '../pages/login/login.vue'; Vue.use(Router) const vm = new Vue(); const router = new Router({ routes: [ { path: '/', component: login }, { path: '/home', component: () => import('../pages/home.vue'), meta: { title: '首页' }, children: [ { path: 'resignlist', name: '注册信息列表', meta: { title: '注册信息列表' }, component: () => import('../pages/resign/resignlist.vue'), }, { path: 'Authorized', name: '注册信息查询', meta: { title: '注册信息查询' }, component: () => import('../pages/resign/Authorized.vue'), }, { path: 'resign', name: '注册', meta: { title: '注册' }, component: () => import('../pages/resign/resign.vue'), } // { // path: 'applypk', // name: '注册', // meta: { title: 'pk码申请' }, // component: () => import('../pages/resign/applypk.vue'), // } ] }, { path: '*', redirect: '/' }, ] }) // 当用户的没有登录时,禁止进入系统 // router.beforeEach((to, form, next) => { // var time = Date.now(); // // 判断本地存储是否有msg值 // const item = JSON.parse(localStorage.getItem("item") || "{}"); // // 过期时间59分钟 // const overtime = 59 * 60 * 1000; // // 判断时间是否过期 // if ((time - item.time) < overtime == false) { // console.log(111); // vm.$message.info('登录失效,请重新登录!'); // localStorage.removeItem("item") // } // if (to.path != "/") { // // 判断本地存储中的item是否为空 // if (item == {}) { // next("/"); // return; // } // // 如果本地存储item有值 // if (item.msg) { // next() // } else { // next("/") // } // } else { // next(); // } // }) export default router;<file_sep>/src/utils/request.js // 引入axios import axios from 'axios'; //基础路径 const baseUrl = "/api"; // const baseUrl = ""; // 请求拦截 axios.interceptors.request.use(config => { return config; }, err => { return Promise.resolve(err); }) // 响应拦截 axios.interceptors.response.use(res => { // console.group("本次请求的路径为:" + res.config.url) // console.log(res); return res; }); //注册信息列表 export const resignList = () => { return axios({ method: 'get', url: baseUrl + '/all', }) }; // 授权注册信息列表 export const authorized = (data) => { return axios({ method: 'post', url: baseUrl + '/search', data: data }); }; // 注册 export const resign = (data) => { return axios({ method: 'post', url: baseUrl + '/mksn', data: data }); }; // 登录 export const login = (data) => { return axios({ method: 'post', url: baseUrl + '/auth', data }) } <file_sep>/src/store/modules/resign.js import Vue from 'vue'; const vm = new Vue(); import router from '../../router/index'; // 请求接口 import { resignList, authorized } from '../../utils/request'; import {sntype} from '../../utils/public' const state = { resignList: [],//注册信息列表 }; const mutations = { changresignList(state, arr) { state.resignList = arr; } }; const actions = { // 注册信息列表 ResignListActions(context) { resignList().then(res => { var arr = []; if (res.status === 200) { res.data.forEach((item, index) => { // console.log(item[0].substring(31)); var temp = {}; item.forEach((value, index) => { temp[index] = value; }); temp.key = index; sntype(temp,item[0].substring(32)); arr.push(temp); }) context.commit("changresignList", arr); } }).catch((err)=>{ // 当放回的状态码为401时,跳转到登录页 if(err.response.status==401){ vm.$message.info('登录过期,请重新登录!'); router.push("/"); } }) }, }; const getters = { resignlist(state) { return state.resignList; } }; export default { state, mutations, actions, getters, namespaced: true }<file_sep>/src/store/actions.js //作用范围:actions //将所有的内容暴露,以便store中的index.js文件能够引用 export default { }
851d15aa0f0a7abb62f5d44dfcfa08fd95270632
[ "JavaScript" ]
6
JavaScript
jhh03260318/sn
a4e96a799c5926924c97eaea0b0b717d103f4149
6798197cceb8746174166141cf981cae8c0d4d20
refs/heads/master
<repo_name>tw-spic/charchapoint<file_sep>/models/zone.go package models import ( "database/sql" ) type Zone struct { Id *int64 Name *string Description *string Lat *float64 Long *float64 Radius *float64 } const ( SaveZoneQuery = `INSERT INTO zones (name,description,lat,long,radius) VALUES ($1,$2,$3,$4,$5)` GetZonesWithinRadiusQuery = `SELECT id,name,description,lat,long,radius FROM zones WHERE ACOS( SIN( RADIANS( lat ) ) * SIN( RADIANS( $1 ) ) + COS( RADIANS( lat ) ) * COS( RADIANS( $1 )) * COS( RADIANS( long ) - RADIANS( $2 )) ) * 6380 < $3;` //6380 is approx radius of earth in km ) func (z *Zone) SaveToDb(db *sql.DB) error { _, err := db.Exec(SaveZoneQuery, z.Name, z.Description, z.Lat, z.Long, z.Radius) return err } // GetZonesWithinRadiusFrom returns all the zones within the given radius from given point. // Input is latitude and longitude of the point in radian and radius in km. func GetZonesWithinRadiusFrom(lat, long, radius float64, db *sql.DB) ([]Zone, error) { zones := make([]Zone, 0) rows, err := db.Query(GetZonesWithinRadiusQuery, lat, long, radius) if rows != nil { defer rows.Close() } if err != nil { return zones, err } for rows.Next() { z := Zone{} err := rows.Scan(&z.Id, &z.Name, &z.Description, &z.Lat, &z.Long, &z.Radius) if err != nil { return zones, err } zones = append(zones, z) } return zones, nil } <file_sep>/handlers/zone.go package handlers import ( "database/sql" "encoding/json" "log" "net/http" "strconv" m "github.com/tw-spic/charchapoint/models" ) func CreateZoneHandler(db *sql.DB) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { if r.Body == nil { log.Println("Create zone: Empty request body") w.WriteHeader(http.StatusBadRequest) return } decoder := json.NewDecoder(r.Body) var z m.Zone err := decoder.Decode(&z) if err != nil { log.Println("Create zone:", err) w.WriteHeader(http.StatusBadRequest) return } if z.Name == nil || z.Lat == nil || z.Long == nil || z.Radius == nil { log.Println("Create zone empty value in request body: ", z) w.WriteHeader(http.StatusBadRequest) return } err = z.SaveToDb(db) if err != nil { log.Println("Create zone save to db :", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusCreated) } } func GetZoneHandler(db *sql.DB) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { lat, err := strconv.ParseFloat(r.FormValue("lat"), 64) if err != nil { log.Println("Get zone invalid lat: ", err) w.WriteHeader(http.StatusBadRequest) return } long, err := strconv.ParseFloat(r.FormValue("long"), 64) if err != nil { log.Println("Get zone invalid long: ", err) w.WriteHeader(http.StatusBadRequest) return } radius, err := strconv.ParseFloat(r.FormValue("radius"), 64) if err != nil { log.Println("Get zone invalid radius: ", err) w.WriteHeader(http.StatusBadRequest) return } zones, err := m.GetZonesWithinRadiusFrom(lat, long, radius, db) if err != nil { log.Println("Get zone error fetching data from db:", err) w.WriteHeader(http.StatusInternalServerError) return } data, err := json.Marshal(zones) if err != nil { log.Println("Get zone error converting to JSON:", err) w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(data) } } <file_sep>/mobileapp/app/containers/RandomUserContainer.js import React, { Component } from 'react'; import { AppRegistry, StyleSheet, Text, View, Navigator, TouchableNativeFeedback, Image } from 'react-native'; import { connect } from 'react-redux'; var getRandomUser = require('../actions/getRandomUser'); class RandomUserContainer extends Component { constructor(props) { super(props); } componentDidMount() { this.props.getRandomUser(); } renderUserInfo(user) { return ( <View> <Image style={{ width: 120, height: 120, backgroundColor: 'transparent', marginLeft: 30, borderRadius: 20 }} resizeMode={Image.resizeMode.contain} source={{uri: user.picture.large}} /> <Text>First Name: { user.name.first }</Text> <Text>Last Name: { user.name.last }</Text> </View> ); } render() { return ( <View> <Text>Random User Container</Text> {(this.props.loading) && <Text>Loading user data...</Text>} {(this.props.user) && this.renderUserInfo(this.props.user)} </View> ); } } function mapStateToProps(state) { var loading = state.loading; var user = (state.user) ? state.user : null; return { user, loading }; } const mapDispatchToProps = { getRandomUser } module.exports = connect(mapStateToProps, mapDispatchToProps)(RandomUserContainer); <file_sep>/Makefile build: compile copy-resources compile: go build -o out/charchapoint main/main.go copy-resources: mkdir -p out/public cp public/index.html out/public/ cp config.json out/<file_sep>/handlers/message.go package handlers import ( "database/sql" "encoding/json" "log" "net/http" "time" m "github.com/tw-spic/charchapoint/models" ) func CreateMessageHandler(db *sql.DB) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { if r.Body == nil { log.Println("Create message: Empty request body") w.WriteHeader(http.StatusBadRequest) return } decoder := json.NewDecoder(r.Body) var msg m.Message err := decoder.Decode(&msg) if err != nil { log.Println("Create message:", err) w.WriteHeader(http.StatusBadRequest) return } if msg.DeviceId == nil || msg.Message == nil { log.Println("Create message empty value in request body: ", msg) w.WriteHeader(http.StatusBadRequest) return } now := time.Now() msg.MsgTime = &now err = msg.SaveToDb(db) if err != nil { log.Println("Create message save to db :", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusCreated) } } <file_sep>/handlers/zone_test.go package handlers_test import ( "fmt" "net/http" "net/http/httptest" "strings" "testing" h "github.com/tw-spic/charchapoint/handlers" "github.com/stretchr/testify/assert" sqlmock "gopkg.in/DATA-DOG/go-sqlmock.v1" ) ////////////////////////////////// Create Zone ///////////////////////////////////////////////// func TestCreateZoneSuccess(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "Name":"Zone1", "Description": "Awesome zone", "Lat":10.532, "Long":11.324, "Radius":10 }`)) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() mock.ExpectExec(`INSERT INTO zones \(name,description,lat,long,radius\) VALUES \(\$1,\$2,\$3,\$4,\$5\)`).WithArgs("Zone1", "Awesome zone", 10.532, 11.324, 10.0).WillReturnResult(sqlmock.NewResult(1, 1)) rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusCreated, rr.Code) } func TestCreateZoneFailsForEmptyBody(t *testing.T) { req, err := http.NewRequest("POST", "/", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateZoneFailsForMalformedJSON(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader("}{")) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateZoneFailsIfNameIsEmpty(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "Description": "Awesome zone", "Lat":10.532, "Long":11.324, "Radius":10 }`)) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateZoneFailsIfLatIsEmpty(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "Name":"Zone1", "Description": "Awesome zone", "Long":11.324, "Radius":10 }`)) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateZoneFailsIfLongIsEmpty(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "Name":"Zone1", "Description": "Awesome zone", "Lat":10.532, "Radius":10 }`)) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateZoneFailsIfRadiusIsEmpty(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "Name":"Zone1", "Description": "Awesome zone", "Lat":10.532, "Long":11.324 }`)) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateZoneFailsIfDbFails(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "Name":"Zone1", "Description": "Awesome zone", "Lat":10.532, "Long":11.324, "Radius":10 }`)) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() mock.ExpectExec(`INSERT INTO zones \(name,description,lat,long,radius\) VALUES \(\$1,\$2,\$3,\$4,\$5\)`).WithArgs("Zone1", "Awesome zone", 10.532, 11.324, 10.0).WillReturnError(fmt.Errorf("some error")) rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusInternalServerError, rr.Code) err = mock.ExpectationsWereMet() } /////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////// Get zone //////////////////////////////////////////////////////// func TestGetZoneSuccess(t *testing.T) { req, err := http.NewRequest("GET", "/zone?lat=10.123&long=11.234&radius=1", nil) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rows := sqlmock.NewRows([]string{"id", "name", "description", "lat", "long", "radius"}). AddRow(1, "one", "Oneee", 10.111, 11.222, 10). AddRow(2, "two", "Twooo", 10.222, 11.333, 10) mock.ExpectQuery(`SELECT id,name,description,lat,long,radius FROM zones WHERE ACOS\( SIN\( RADIANS\( lat \) \) \* SIN\( RADIANS\( \$1 \) \) \+ COS\( RADIANS\( lat \) \) \* COS\( RADIANS\( \$1 \)\) \* COS\( RADIANS\( long \) - RADIANS\( \$2 \)\) \) \* 6380 < \$3;`).WithArgs(10.123, 11.234, 1.0).WillReturnRows(rows) rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusOK, rr.Code) assert.Equal(`[{"Id":1,"Name":"one","Description":"Oneee","Lat":10.111,"Long":11.222,"Radius":10},{"Id":2,"Name":"two","Description":"Twooo","Lat":10.222,"Long":11.333,"Radius":10}]`, rr.Body.String()) } func TestGetZoneFailsIfLatIsEmpty(t *testing.T) { req, err := http.NewRequest("GET", "/zone?long=10.123&radius=10", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestGetZoneFailsIfLongIsEmpty(t *testing.T) { req, err := http.NewRequest("GET", "/zone?lat=10.123&radius=10", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestGetZoneFailsIfRadiusIsEmpty(t *testing.T) { req, err := http.NewRequest("GET", "/zone?lat=11.234&long=10.123", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestGetZoneFailsIfLatIsInvalid(t *testing.T) { req, err := http.NewRequest("GET", "/zone?lat=foo&long=10.123&radius=1", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestGetZoneFailsIfLongIsInvalid(t *testing.T) { req, err := http.NewRequest("GET", "/zone?lat=11.234&long=foo&radius=1", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestGetZoneFailsIfRadiusIsInvalid(t *testing.T) { req, err := http.NewRequest("GET", "/zone?lat=11.234&long=10.123&radius=foo", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestGetZoneFailsIfDbFails(t *testing.T) { req, err := http.NewRequest("GET", "/zone?lat=11.234&long=10.123&radius=1", nil) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() mock.ExpectQuery(`SELECT id,name,description,lat,long,radius FROM zones WHERE ACOS\( SIN\( RADIANS\( lat \) \) \* SIN\( RADIANS\( \$1 \) \) \+ COS\( RADIANS\( lat \) \) \* COS\( RADIANS\( \$1 \)\) \* COS\( RADIANS\( long \) - RADIANS\( \$2 \)\) \) \* 6380 < \$3;`).WithArgs(10.123, 11.234, 1.0).WillReturnError(fmt.Errorf("some error")) rr := httptest.NewRecorder() handler := http.HandlerFunc(h.GetZoneHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusInternalServerError, rr.Code) err = mock.ExpectationsWereMet() } <file_sep>/README.md # Charcha Point Location based discussion forum # To build project Download dependencis `glide install` Build `make build` Run `cd out && ./charchapoint` # To run mobile app `cd mobileapp && react-native run-android` <file_sep>/db/init.sql CREATE USER charchapoint_user WITH PASSWORD '******'; CREATE DATABASE charchapoint; \c charchapoint; CREATE TABLE zones( id BIGSERIAL PRIMARY KEY, name TEXT NOT NULL, description TEXT, lat DECIMAL NOT NULL, long DECIMAL NOT NULL, radius DECIMAL NOT NULL ); CREATE EXTENSION pgcrypto; -- for uuid generation gen_random_uuid() CREATE TABLE messages( id UUID PRIMARY KEY, device_id TEXT NOT NULL, message TEXT NOT NULL, msg_time TIMESTAMPTZ NOT NULL ); REVOKE CONNECT ON DATABASE charchapoint FROM PUBLIC; GRANT CONNECT ON DATABASE charchapoint TO charchapoint_user; GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO charchapoint_user; GRANT SELECT, UPDATE ON ALL SEQUENCES IN SCHEMA public TO charchapoint_user;<file_sep>/main/main.go package main import ( "database/sql" "fmt" "log" "net/http" "os" c "github.com/tw-spic/charchapoint/config" h "github.com/tw-spic/charchapoint/handlers" gh "github.com/gorilla/handlers" "github.com/gorilla/mux" _ "github.com/lib/pq" ) func main() { f, err := os.OpenFile("CharchaPoint.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatalf("error opening file: %v", err) } defer f.Close() log.SetOutput(f) conf, err := c.ReadFromFile("config.json") if err != nil { log.Fatal(err) } connString := fmt.Sprintf("user=%s password=%s dbname=charchapoint sslmode=disable", conf.DBUsername, conf.DBPassword) db, err := sql.Open("postgres", connString) defer db.Close() if err != nil { log.Fatal(err) } r := mux.NewRouter() r.HandleFunc("/", h.ServeIndexPage()) r.HandleFunc("/zone", h.CreateZoneHandler(db)).Methods("POST") r.HandleFunc("/zone", h.GetZoneHandler(db)).Methods("GET") r.HandleFunc("/message", h.CreateMessageHandler(db)).Methods("POST") r.PathPrefix("/public").Handler(http.StripPrefix("/public", http.FileServer(http.Dir("./public")))) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", conf.Port), gh.LoggingHandler(f, r))) } <file_sep>/mobileapp/index.android.js import React from 'react'; import { Platform, StyleSheet, Text, View, AppRegistry, ToastAndroid, ActivityIndicator, AsyncStorage, TouchableWithoutFeedback } from 'react-native'; import { NativeModules } from 'react-native'; import * as firebase from 'firebase' import { GiftedChat } from 'react-native-gifted-chat'; import geodist from 'geodist'; import DeviceInfo from 'react-native-device-info'; const MAX_NO_OF_MSGS_PER_GROUP = 100; const firebaseConfig = { apiKey: "masked", authDomain: "1028630304114-nec2l2fmac4p32dke638rofrv9u1q9tt.apps.googleusercontent.com", databaseURL: "charcha-point.firebaseio.com", storageBucket: "charcha-point.appspot.com" }; const styles = StyleSheet.create({ view:{ flex:1, flexDirection:'column', alignItems:'center', justifyContent:'center' }, titleText: { fontSize: 20, fontWeight: 'bold', textAlign: 'center', paddingBottom:20, }, normalText: { textAlign: 'center', paddingBottom:20, }, }); const firebaseApp = firebase.initializeApp(firebaseConfig); class CharchaPoint extends React.Component { constructor(props) { super(props); this.deviceId = DeviceInfo.getUniqueID(); this.onSend = this.onSend.bind(this); this.findCurrentZone = this.findCurrentZone.bind(this); this.subscribeMessages = this.subscribeMessages.bind(this); this.registerLocationWatcher = this.registerLocationWatcher.bind(this); this.setCurrentZone = this.setCurrentZone.bind(this); this.updateZones = this.updateZones.bind(this); this.state = {messages: []}; } componentDidMount() { this.updateZones(); this.registerLocationWatcher(); } updateZones() { try { AsyncStorage.getItem("zones").then((value) => { if(value) { this.zones = JSON.parse(value); } }).done(); } catch (err) { console.log(err); } firebase.database().ref('zones/zones').on('value', (snapshot) => { this.zones = snapshot.val(); try { AsyncStorage.setItem("zones", JSON.stringify(this.zones)); } catch (err) { console.log(err); } if(!this.state.zone) { this.setCurrentZone(); } }); } registerLocationWatcher() { navigator.geolocation.getCurrentPosition( (position) => {}, (error) => {}, {enableHighAccuracy: true, timeout: 20000, maximumAge: 1000} ); this.watchID = navigator.geolocation.watchPosition((position) => { this.lat = position.coords.latitude; this.long = position.coords.longitude; this.setCurrentZone(); }); } subscribeMessages() { var messagesRef = firebase.database().ref("messages/" + this.state.zone.Id).limitToLast(MAX_NO_OF_MSGS_PER_GROUP); messagesRef.on('child_added', (msg) => { this.setState((previousState) => { return { ...previousState, messages: GiftedChat.append(previousState.messages, msg.val().message), }; }); }); } setCurrentZone() { if (!this.lat || !this.long) { return; } var currZone = this.findCurrentZone(this.lat, this.long); if (!currZone || currZone === this.state.zone) { return; } ToastAndroid.show("You are in " + currZone.Name + " zone. \n" + currZone.Description, ToastAndroid.LONG); this.setState((previousState) => { return { ...previousState, zone:currZone }; }); this.subscribeMessages(); } findCurrentZone(lat, long) { if (!this.zones) { return; } return this.zones.find((zone) => { return geodist({lat:lat,lon:long}, { lat:zone.Lat, lon:zone.Long }, {unit: 'meters'}) < zone.Radius}) } onSend(messages = []) { // now we are directly writing to the FCM database, we may need to route it through our server to enable FCM notifications firebase.database().ref("messages/" + this.state.zone.Id).push({ message: messages }); } componentWillUnmount() { navigator.geolocation.clearWatch(this.watchID); } render() { if(!this.state.zone){ return ( <View style={styles.view}> <Text style={styles.titleText} >Charcha point </Text> <Text style={styles.normalText} >We are finding zone near to you. Please wait...</Text> <ActivityIndicator color="#0000ff" size="large"/> </View> ); } else { return ( <GiftedChat messages={this.state.messages} onSend={this.onSend} user={{ _id: this.deviceId, }} /> ); } } } AppRegistry.registerComponent('CharchaPoint', () => CharchaPoint); <file_sep>/handlers/message_test.go package handlers_test import ( "fmt" "net/http" "net/http/httptest" "strings" "testing" h "github.com/tw-spic/charchapoint/handlers" "github.com/stretchr/testify/assert" sqlmock "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestCreateMessageSuccess(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "DeviceId":"xxxxx", "Message":"Hello world" }`)) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() mock.ExpectExec(`INSERT INTO messages\(id, device_id, message, msg_time\) VALUES\(gen_random_uuid\(\), \$1, \$2, \$3\)`).WithArgs("xxxxx", "Hello world", sqlmock.AnyArg()).WillReturnResult(sqlmock.NewResult(1, 1)) rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateMessageHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusCreated, rr.Code) } func TestCreateMessageFailsForEmptyBody(t *testing.T) { req, err := http.NewRequest("POST", "/", nil) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateMessageHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateMessageFailsForMalformedJSON(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader("}{")) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateMessageHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateMessageFailsIfDeviceIdIsEmpty(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "Message":"Hello" }`)) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateMessageHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateMessageFailsIfMessageIsEmpty(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "DeviceId":"xxxx" }`)) if err != nil { t.Fatal(err) } db, _, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateMessageHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusBadRequest, rr.Code) } func TestCreateMessageFailsIfDbFails(t *testing.T) { req, err := http.NewRequest("POST", "/", strings.NewReader(`{ "DeviceId":"xxxx", "Message":"Hello world" }`)) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } defer db.Close() mock.ExpectExec(`INSERT INTO messages\(id, device_id, message, msg_time\) VALUES\(gen_random_uuid\(\), \$1, \$2, \$3\)`).WithArgs("xxxxx", "Hello world", sqlmock.AnyArg()).WillReturnError(fmt.Errorf("some error")) rr := httptest.NewRecorder() handler := http.HandlerFunc(h.CreateMessageHandler(db)) handler.ServeHTTP(rr, req) assert := assert.New(t) assert.Equal(http.StatusInternalServerError, rr.Code) err = mock.ExpectationsWereMet() } <file_sep>/config/config.go package config import ( "encoding/json" "os" ) type Configuration struct { Port int DBServer string DBPort int DBUsername string DBPassword string } func ReadFromFile(path string) (Configuration, error) { configuration := Configuration{} file, err := os.Open(path) defer file.Close() if err != nil { return configuration, err } decoder := json.NewDecoder(file) err = decoder.Decode(&configuration) if err != nil { return configuration, err } return configuration, nil } <file_sep>/handlers/index.go package handlers import ( "html/template" "log" "net/http" ) func ServeIndexPage() func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { home, err := template.ParseFiles("public/index.html") if err != nil { log.Println(err) } w.Header().Set("Content-Type", "text/html; charset=utf-8") home.Execute(w, r.Host) } } <file_sep>/models/message.go package models import ( "database/sql" "time" ) type Message struct { Id *string `sql:id` DeviceId *string `sql:device_id` Message *string `sql:message` MsgTime *time.Time `sql:msg_time` } const ( SaveMessageQuery = `INSERT INTO messages(id, device_id, message, msg_time) VALUES(gen_random_uuid(), $1, $2, $3)` ) func (m *Message) SaveToDb(db *sql.DB) error { _, err := db.Exec(SaveMessageQuery, m.DeviceId, m.Message, m.MsgTime) return err }
e6a4303a2270bf9d95b4133b7443780294703273
[ "SQL", "JavaScript", "Markdown", "Makefile", "Go" ]
14
Go
tw-spic/charchapoint
423f79b071f6e6fab796e9a976514dadb5a3eb25
caccee27ac07bc8f99aec31a614ffdaa5d8546c3
refs/heads/master
<file_sep># ml_course Этот репозиторий содержит в себе необходимые датасеты для практики ml, а также ноутбуки с воркшопов. В папке Projects находятся проекты, ее необходимо скачать, чтобы выполнить задания. <file_sep>import pandas as pd from time import sleep def basic_answer(mode=None): if mode is None: print("Думаю...") sleep(1) else: print("Запускаю тестирование...") sleep(1) print("Проверяю метрики...") sleep(1) class ChurnPrediction: """ Класс для проверки задания на предсказание оттока клиентов. Берет на вход """ def __init__(self): self.name = "" self._drop_runs_number = 0 self._null_runs_number = 0 self._new_feature_runs_number = 0 self._model_runs_number = 0 print('Привет! Приятно познакомиться!') def test_task(self, df=None): if df is None: print( "Давайте представим, что здесь написано очень интересное задание!" ) elif isinstance(df, pd.DataFrame): basic_answer() print( "Ого! Датасет, сейчас мы будем его исследовать, интересно, что в нем." ) print("Сам я не справляюсь - нужна твоя помощь") def drop_task(self, df=None, answer="Условие"): """ Метод для проверки задания на удаление колонок с большим количесвто уникальных значений. А также персонализированной колонки Surname :return: None """ if df is None: print( "В датафрейме есть несколько колонок, в которых слишком много уникальных значений, нужно их найти и удалить." "А получившийся датафрейм передать мне в параметрах." ) else: if not isinstance(answer, pd.DataFrame): basic_answer() print("Ой-ой. Я тебя не понимаю, мне нужен pd.DataFrame без колонок, в которых слишком много уникальных значений") else: basic_answer() self._drop_runs_number += 1 if (self._drop_runs_number == 2) & ("RowNumber" in df.columns): print("Тут может пригодиться метод .nunique()") elif "RowNumber" in df.columns: print( "Нужно ответить на очень важный вопрос - сколько уникальных значений имеет каждый признак." ) elif "CustomerId" in df.columns: print( "Нужно ответить на очень важный вопрос - сколько уникальных значений имеет каждый признак." ) elif "Surname" in df.columns: print( "Осталась еще одна колонка. У людей есть важный почти уникальный признак. А как тебя зовут?" ) elif ( ("RowNumber" not in df.columns) & ("CustomerId" not in df.columns) & ("Surname" not in df.columns) ): basic_answer() print("Кодовая фраза Data") print("Ура! Первое задание позади!") def null_task(self, answer=None): if answer is None: print( "Нужно понять есть ли в датафрейме пропущенные значения и отправить ответ мне\n" "Я понимаю только 'Да' или 'Нет'" ) else: basic_answer() if answer.lower() == "нет": print("Ура! Второе задание позади!") print("Следующая кодовая фраза Science") elif answer.lower() == "да": print( "Увы :( Тут поможет метод .isnull().sum() попробуй еще раз" ) else: print("Ой-ой я понимаю только да или нет :)") def production_quality(self, answer=None): if answer is None: print( "Я умею оценивать качество на тесте. Метрика, которую я измеряю ROC-AUC." "Тут можно отправлять сделанные предсказания. Чтобы я не запутался куда какие предсказания, давай будем присылать в виде датафрейма " "у которого первая колонка 'RowNumber', а вторая 'predict'. Пример посылки попробуем, когда отправим submission." ) else: if not isinstance(answer, pd.DataFrame): basic_answer() print("Ой-Ой. Я тебя не понимаю - мне нужен pd.DataFrame с колонками RowNumber и predict. predict - это предсказание твоей модели") else: basic_answer(mode="test") from sklearn.metrics import roc_auc_score import random merged = pd.read_csv("./data/meta_file.csv").merge( answer, on=["RowNumber"] ) score = roc_auc_score(merged["Exited"], merged["predict"]) print( f"Твой результат: {roc_auc_score(merged['Exited'], merged['predict'])}" ) if score == 0.5: print("Ого! Да это же самое хитрое решение - хоть я и простой бот, " "но монетку подбросить и наугад сказать даже я смогу." "Я уверен - как-то точно можно улучшить предсказания!") elif score >= 0.8: print( "Наконец-то мы сэкономили денег! Чтобы себя порадовать - можно посчитать примерные значения в деньгах, как мы посчитали с бейзлайном :)." " Финальная кодовая фраза '- моя любимая наука' " "Если вспомнить все предыдущие получится: Data Science - моя любимая наука" "Что-то правда, то правда - обожаю анализировать данные, особенно, когда мне помогают" ) else: basic_answer() answers = ["А что если попробовать бустинг?", "Ты же помнишь, что ROC-AUC считается по вероятностям?", "А что если построить случайный лес?", "Нужно качество больше 0.80, я верю - у тебя получится!"] print(random.choice(answers)) <file_sep>import pandas as pd from time import sleep def basic_answer(mode=None): if mode is None: print("Думаю...") sleep(1) else: print("Запускаю тестирование...") sleep(1) print("Проверяю метрики...") sleep(1) class RiskDefaultPrediction: """ Класс для проверки задания на предсказание оттока клиентов. Берет на вход """ def __init__(self): self.name = "" self._drop_runs_number = 0 self._null_runs_number = 0 self._new_feature_runs_number = 0 self._model_runs_number = 0 print('Привет! Приятно познакомиться!') def test_task(self, df=None): if df is None: print( "Давайте представим, что здесь написано очень интересное задание!" ) elif isinstance(df, pd.DataFrame): basic_answer() print( "Ого! Датасет, сейчас мы будем его исследовать, интересно, что в нем." ) print("Сам я не справляюсь - нужна твоя помощь") def production_quality(self, answer=None): if answer is None: print( "Я умею оценивать качество на тесте. Метрика, которую я измеряю ROC-AUC." "Тут можно отправлять сделанные предсказания. Чтобы я не запутался куда какие предсказания, давай будем присылать в виде датафрейма " "у которого первая колонка 'sk_id_curr', а вторая 'score'. Пример посылки попробуем, когда отправим submission." ) else: if not isinstance(answer, pd.DataFrame): basic_answer() print("Ой-Ой. Я тебя не понимаю - мне нужен pd.DataFrame с колонками 'sk_id_curr', а вторая 'score'. Score - это предсказание твоей модели") else: basic_answer(mode="test") from sklearn.metrics import roc_auc_score import random merged = pd.read_csv("../data/test_target.csv").merge( answer, on=["sk_id_curr"] ) score = roc_auc_score(merged["target"], merged["score"]) print( f"Твой результат: {roc_auc_score(merged['target'], merged['score'])}" ) if score == 0.5: print("Ого! Да это же самое хитрое решение - хоть я и простой бот, " "но монетку подбросить и наугад сказать даже я смогу." "Я уверен - как-то точно можно улучшить предсказания!") elif score >= 0.77: print( "Ура! Мы получили удовлетворительную по качеству модельку!" " Финальная кодовая фраза 'Data Scientist’ы делают этот мир лучше!' " "Это правда, мы с тобой сделали мир лучше, позволив компании уверенее " "принимать решения на основе данных и увереннее развивать свой бизнес!" ) else: basic_answer() answers = [ "А что если попробовать бустинг?", "Ты же помнишь, что ROC-AUC считается по вероятностям?", "А ты использовал все доступные источники данных?", "Может тебе увеличить сложность модели? Увеличить num_boost_rounds? А early_stopping_rounds?" "Признаки можно создавать не только с помощью агрегаций, но и с помощью отношений (сумма кредита к сумме зарплаты)", "Если ты используешь бустинг, то все реализации ты попробовал? LightGBM? XGBoost? CatBoost?", "Может быть имеет смысл сделать отбор признаков? Удалить все лишнее?" ] print(random.choice(answers)) print("Нужно качество больше 0.77, я верю - у тебя получится!")
e0b3888f0e13e6d12729d8bee241f03d946ae336
[ "Markdown", "Python" ]
3
Markdown
productstar-team/ml_course
dfab269fe536f246ead82f4fe34f9a68c9e5a5ff
e2c7de0ce97861e79aea66b02c129321626eeb5d
refs/heads/master
<repo_name>dyma-projects/5a9cebc0fc5a7199<file_sep>/projet4/src/app/shared/color.directive.ts import { Directive, ElementRef, Renderer2, OnInit, HostListener } from '@angular/core'; @Directive({ selector: '[appCouleur]' }) export class ColorDirective implements OnInit { private couleur = 'black'; private key = 0; @HostListener('window:keyup', ['$event']) keyEvent(event: KeyboardEvent) { this.key = event.keyCode; if (this.key === 37) { this.couleur = 'blue'; } else if (this.key === 38) { this.couleur = 'red'; } else if (this.key === 39) { this.couleur = 'yellow'; } else if (this.key === 40) { this.couleur = 'green'; } else { this.couleur = 'black'; } this.renderer.setStyle(this.el.nativeElement, 'color', this.couleur); } constructor(private el: ElementRef, private renderer: Renderer2) {} ngOnInit() { } }
5a88e49c83ba1cebbd9db551763512b4e075d1eb
[ "TypeScript" ]
1
TypeScript
dyma-projects/5a9cebc0fc5a7199
23ff0959fca68b76133c8c0a52ffe523661870a4
ef443e321e1b1e1292add626b7480b376c2e96a5
refs/heads/master
<repo_name>FolkGAS/JavaRushHomeWork<file_sep>/src/com/javarush/test/level27/lesson15/big01/ad/StatisticAdvertisementManager.java package com.javarush.test.level27.lesson15.big01.ad; import java.util.*; public class StatisticAdvertisementManager { private static StatisticAdvertisementManager ourInstance = new StatisticAdvertisementManager(); public static StatisticAdvertisementManager getInstance() { return ourInstance; } private StatisticAdvertisementManager() { } AdvertisementStorage storage = AdvertisementStorage.getInstance(); public List<Advertisement> getActiveVideos(){ List<Advertisement> activeVideos = new ArrayList<>(); for (Advertisement adv : storage.list()) if (adv.getHits() > 0) activeVideos.add(adv); Collections.sort(activeVideos, new Comparator<Advertisement>() { @Override public int compare(Advertisement o1, Advertisement o2) { return o1.getName().compareToIgnoreCase(o2.getName()); } }); return activeVideos; } public List<Advertisement> getUnactiveVideos(){ List<Advertisement> unActiveVideos = new ArrayList<>(); for (Advertisement adv : storage.list()) if (adv.getHits() == 0) unActiveVideos.add(adv); Collections.sort(unActiveVideos, new Comparator<Advertisement>() { @Override public int compare(Advertisement o1, Advertisement o2) { return o1.getName().compareToIgnoreCase(o2.getName()); } }); return unActiveVideos; } } <file_sep>/src/com/javarush/test/level08/lesson11/home09/Solution.java package com.javarush.test.level08.lesson11.home09; import java.util.Date; /* Работа с датой 1. Реализовать метод isDateOdd(String date) так, чтобы он возвращал true, если количество дней с начала года - нечетное число, иначе false 2. String date передается в формате MAY 1 2013 Не забудьте учесть первый день года. Пример: JANUARY 1 2000 = true JANUARY 2 2020 = false */ public class Solution { public static void main(String[] args) { isDateOdd("JANUARY 1 2000"); } public static boolean isDateOdd(String date) { Date dateThis = new Date(date); Date dateStart = new Date(); dateStart.setSeconds(0); dateStart.setMinutes(0); dateStart.setHours(0); dateStart.setDate(0); dateStart.setMonth(0); dateStart.setYear(dateThis.getYear()); long ms = dateThis.getTime() - dateStart.getTime(); int days = (int) (ms / (24 * 60 * 60 * 1000)); if (days % 2 == 0) return true; else return false; } } <file_sep>/src/com/javarush/test/level18/lesson10/home08/Solution.java package com.javarush.test.level18.lesson10.home08; import java.io.*; import java.util.HashMap; import java.util.Map; /* Нити и байты Читайте с консоли имена файлов, пока не будет введено слово "exit" Передайте имя файла в нить ReadThread Нить ReadThread должна найти байт, который встречается в файле максимальное число раз, и добавить его в словарь resultMap, где параметр String - это имя файла, параметр Integer - это искомый байт. Закрыть потоки. Не использовать try-with-resources */ public class Solution { public static Map<String, Integer> resultMap = new HashMap<String, Integer>(); public static void main(String[] args) { String fileName = ""; try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); while (!"exit".equals(fileName = reader.readLine())) if (!"".equals(fileName)) new ReadThread(fileName).start(); reader.close(); } catch (IOException exc) {exc.printStackTrace();} } public static class ReadThread extends Thread { String filename = ""; public ReadThread(String fileName) { //implement constructor body this.filename = fileName; } public void run() { // implement file reading here - реализуйте чтение из файла тут try { int singleByte, max = 0; HashMap <Integer, Integer> bytes = new HashMap<>(); FileInputStream fis = new FileInputStream(filename); while ((singleByte = fis.read()) != -1) { if (bytes.containsKey(singleByte)) bytes.put(singleByte, bytes.get(singleByte)+1); else bytes.put(singleByte, 1); } for (Map.Entry<Integer, Integer> pair : bytes.entrySet()) max = (max < pair.getValue()) ? pair.getValue() : max; for (Map.Entry<Integer, Integer> pair : bytes.entrySet()) if (pair.getValue() == max) resultMap.put(filename, pair.getKey()); fis.close(); } catch (IOException exc) {exc.printStackTrace();} } } } <file_sep>/src/com/javarush/test/level18/lesson10/home10/Solution.java package com.javarush.test.level18.lesson10.home10; /* Собираем файл Собираем файл из кусочков Считывать с консоли имена файлов Каждый файл имеет имя: [someName].partN. Например, Lion.avi.part1, Lion.avi.part2, ..., Lion.avi.part37. Имена файлов подаются в произвольном порядке. Ввод заканчивается словом "end" В папке, где находятся все прочтенные файлы, создать файл без приставки [.partN]. Например, Lion.avi В него переписать все байты из файлов-частей используя буфер. Файлы переписывать в строгой последовательности, сначала первую часть, потом вторую, ..., в конце - последнюю. Закрыть потоки. Не использовать try-with-resources */ import java.io.*; import java.util.ArrayList; import java.util.Collections; public class Solution { public static void main(String[] args) { try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); FileInputStream fis = null; String fileName, name = ""; int part; ArrayList <Integer> list = new ArrayList<>(); while (!"end".equals(fileName = reader.readLine())) { if ("".equals(name)) name = fileName.substring(0, fileName.lastIndexOf(".part")); part = Integer.parseInt(fileName.substring(fileName.lastIndexOf(".part") + 5, fileName.length())); list.add(part); } Collections.sort(list); FileOutputStream fos = new FileOutputStream(name); for (int n : list) { fis = new FileInputStream(name + ".part" + n); while (fis.available() > 0) fos.write(fis.read()); fis.close(); } fos.close(); reader.close(); } catch (IOException exc) {exc.printStackTrace();} } } <file_sep>/src/com/javarush/test/level26/lesson15/big01/command/WithdrawCommand.java package com.javarush.test.level26.lesson15.big01.command; import com.javarush.test.level26.lesson15.big01.CashMachine; import com.javarush.test.level26.lesson15.big01.ConsoleHelper; import com.javarush.test.level26.lesson15.big01.CurrencyManipulator; import com.javarush.test.level26.lesson15.big01.CurrencyManipulatorFactory; import com.javarush.test.level26.lesson15.big01.exception.InterruptOperationException; import com.javarush.test.level26.lesson15.big01.exception.NotEnoughMoneyException; import java.util.Map; import java.util.ResourceBundle; class WithdrawCommand implements Command { private ResourceBundle res = ResourceBundle.getBundle(CashMachine.RESOURCE_PATH + "withdraw_en"); @Override public void execute() throws InterruptOperationException { String money = null; boolean reenter = true; Map<Integer, Integer> withdraw = null; CurrencyManipulator manipulator = null; ConsoleHelper.writeMessage((String) res.getObject("before")); while (manipulator == null || manipulator.getTotalAmount() == 0) { String code = ConsoleHelper.askCurrencyCode(); manipulator = CurrencyManipulatorFactory.getManipulatorByCurrencyCode(code); } ConsoleHelper.writeMessage((String) res.getObject("specify.amount")); while(reenter){ reenter = false; money = ConsoleHelper.readString(); if (money == null || !money.matches("-?\\d+") || Integer.parseInt(money)<= 0){ ConsoleHelper.writeMessage((String) res.getObject("specify.not.empty.amount")); reenter = true; continue; } if (!manipulator.isAmountAvailable(Integer.parseInt(money))){ ConsoleHelper.writeMessage((String) res.getObject("not.enough.money")); reenter = true; continue; } try {withdraw = manipulator.withdrawAmount(Integer.parseInt(money));} catch (NotEnoughMoneyException exc){ ConsoleHelper.writeMessage((String) res.getObject("exact.amount.not.available")); reenter = true; } } for (Map.Entry<Integer, Integer> pair : withdraw.entrySet()) ConsoleHelper.writeMessage("\t" + pair.getKey() + " - " + pair.getValue()); ConsoleHelper.writeMessage(String.format((String) res.getObject("success.format"), Integer.parseInt(money), manipulator.getCurrencyCode())); ConsoleHelper.writeMessage(""); } } <file_sep>/src/com/javarush/test/level18/lesson10/bonus01/Solution.java package com.javarush.test.level18.lesson10.bonus01; /* Шифровка Придумать механизм шифровки/дешифровки Программа запускается с одним из следующих наборов параметров: -e fileName fileOutputName -d fileName fileOutputName где fileName - имя файла, который необходимо зашифровать/расшифровать fileOutputName - имя файла, куда необходимо записать результат шифрования/дешифрования -e - ключ указывает, что необходимо зашифровать данные -d - ключ указывает, что необходимо расшифровать данные */ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; public class Solution { public static void main(String[] args) { try { FileInputStream fis = null; FileOutputStream fos = null; if (args.length != 3) return; if ("-e".equals(args[0])) { fis = new FileInputStream(args[1]); fos = new FileOutputStream(args[2]); while (fis.available() > 0) fos.write(Byte.valueOf((byte)fis.read())+1); fis.close(); fos.close(); } else if ("-d".equals(args[0])) { fis = new FileInputStream(args[1]); fos = new FileOutputStream(args[2]); while (fis.available() > 0) fos.write(Byte.valueOf((byte)fis.read())-1); fis.close(); fos.close(); } else return; } catch (IOException exc) {exc.printStackTrace();} } } <file_sep>/src/com/javarush/test/level18/lesson10/bonus02/Solution.java package com.javarush.test.level18.lesson10.bonus02; /* Прайсы CrUD для таблицы внутри файла Считать с консоли имя файла для операций CrUD Программа запускается со следующим набором параметров: -c productName price quantity Значения параметров: где id - 8 символов productName - название товара, 30 chars (60 bytes) price - цена, 8 символов quantity - количество, 4 символа -c - добавляет товар с заданными параметрами в конец файла, генерирует id самостоятельно, инкрементируя максимальный id, найденный в файле В файле данные хранятся в следующей последовательности (без разделяющих пробелов): id productName price quantity Данные дополнены пробелами до их длины Пример: 19846 Шорты пляжные синие 159.00 12 198478 Шорты пляжные черные с рисунко173.00 17 19847983Куртка для сноубордистов, разм10173.991234 */ import java.io.*; public class Solution { public static void main(String[] args) throws Exception { String sId; int id, idMax = 0; int[] len = {2, 30, 8, 4}; if (!"-c".equals(args[0]) || args.length != 4) return; try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); String fileName = reader.readLine(); reader.close(); BufferedReader reader2 = new BufferedReader(new FileReader(fileName)); while (reader2.ready()) { sId = reader2.readLine(); if (sId.equals("")) sId = "0 "; sId = sId.substring(0, 8); if (sId.contains(" ")) sId = sId.substring(0, sId.indexOf(" ")); id = Integer.parseInt(sId); idMax = (idMax < id) ? id : idMax; } reader2.close(); sId = String.valueOf(idMax+1); sId = cutOrFillToSize(sId, 8); for (int i = 1; i < 4; i++) args[i] = cutOrFillToSize(args[i], len[i]); FileWriter writer = new FileWriter(fileName, true); writer.write(sId + args[1] + args[2] + args[3] + "\n"); writer.flush(); writer.close(); } catch (IOException exc) {exc.printStackTrace();} } static String cutOrFillToSize (String str, int len) { if (str.length() > len) str = str.substring(0, len); for (int i = str.length(); i < len; i++) str += " "; return str; } } <file_sep>/src/com/javarush/test/level31/lesson06/bonus01/Solution.java package com.javarush.test.level31.lesson06.bonus01; import java.io.FileInputStream; import java.io.FileOutputStream; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.zip.ZipInputStream; /* Разархивируем файл В метод main приходит список аргументов. Первый аргумент - имя результирующего файла resultFileName, остальные аргументы - имена файлов fileNamePart. Каждый файл (fileNamePart) - это кусочек zip архива. Нужно разархивировать целый файл, собрав его из кусочков. Записать разархивированный файл в resultFileName. Архив внутри может содержать файл большой длины, например, 50Mb. Внутри архива может содержаться файл с любым именем. Пример входных данных. Внутри архива находится один файл с именем abc.mp3: C:/result.mp3 C:/pathToTest/test.zip.003 C:/pathToTest/test.zip.001 C:/pathToTest/test.zip.004 C:/pathToTest/test.zip.002 */ public class Solution { public static void main(String[] args) throws Exception{ String tempFile = args[0] + ".zip"; ArrayList<String> zipParts = new ArrayList<>(Arrays.asList(Arrays.copyOfRange(args, 1, args.length))); Collections.sort(zipParts); FileOutputStream fosTemp = new FileOutputStream(tempFile); for (String partName : zipParts){ FileInputStream fis = new FileInputStream(partName); byte[] buffer = new byte[fis.available()]; fis.read(buffer); fosTemp.write(buffer); fosTemp.flush(); } fosTemp.close(); ZipInputStream zis = new ZipInputStream(new FileInputStream(tempFile)); zis.getNextEntry(); FileOutputStream fos = new FileOutputStream(args[0]); int count; byte[] buffer = new byte[1024*1000]; while ((count = zis.read(buffer)) != -1){ fos.write(buffer, 0, count); fos.flush(); } zis.close(); Files.delete(Paths.get(tempFile)); fos.close(); } } <file_sep>/src/com/javarush/test/level27/lesson15/big01/kitchen/Dish.java package com.javarush.test.level27.lesson15.big01.kitchen; public enum Dish { Fish(25), Steak(30), Soup(15), Juice(5), Water(3); private int duration; Dish(int duration){ this.duration = duration; } public int getDuration() { return duration; } public static String allDishesToString(){ Dish[] dishes = values(); if (dishes.length == 0) return ""; String strDishes = ""; for (Dish dish : dishes) strDishes += dish + ", "; strDishes = strDishes.substring(0, strDishes.length() - 2); return strDishes; } } <file_sep>/src/com/javarush/test/level28/lesson15/big01/model/MoikrugStrategy.java package com.javarush.test.level28.lesson15.big01.model; import com.javarush.test.level28.lesson15.big01.vo.Vacancy; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import java.io.IOException; import java.util.ArrayList; import java.util.List; public class MoikrugStrategy implements Strategy { private static final String URL_FORMAT = "https://moikrug.ru/vacancies?page=%d&q=java+%s"; private static final String USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0"; private static final String REFERRER = "https://moikrug.ru/"; private static final int timeout = 10 * 1000; @Override public List<Vacancy> getVacancies(String searchString) { int page = 1; String title, salary, city, companyName, siteName, url; Element elUrlAndTitle, elSalary, elCity, elCompanyName; List<Vacancy> vacancies = new ArrayList<>(); List<Element> elements; Vacancy vacancy; while (true) { Document document = new Document("localhost"); try{ document = getDocument(searchString, page++); }catch (IOException e){} elements = document.getElementsByClass("job"); if (elements.size() == 0) break; for (Element element : elements){ vacancy = new Vacancy(); elUrlAndTitle = element.getElementsByClass("Title").first().getElementsByAttribute("href").first(); if (elUrlAndTitle != null) { url = elUrlAndTitle.attr("abs:href"); title = elUrlAndTitle.html(); } else { url = ""; title = ""; } elSalary = element.getElementsByClass("count").first(); if (elSalary != null) salary = elSalary.text(); else salary = ""; elCompanyName = element.getElementsByClass("company_name").first(); if (elCompanyName != null) companyName = elCompanyName.text(); else companyName = ""; elCity = element.getElementsByClass("location").first(); if (elCity != null) city = elCity.text(); else city = ""; if (document.title() != null || !document.title().equals("")) siteName = document.title(); else siteName = ""; vacancy.setTitle(title); vacancy.setSalary(salary); vacancy.setCity(city); vacancy.setCompanyName(companyName); vacancy.setSiteName(siteName); vacancy.setUrl(url); vacancies.add(vacancy); } } return vacancies; } protected Document getDocument(String searchString, int page) throws IOException { return Jsoup.connect(String.format(URL_FORMAT, page, searchString)).userAgent(USER_AGENT).referrer(REFERRER).get(); } } <file_sep>/src/com/javarush/test/level18/lesson10/bonus03/Solution.java package com.javarush.test.level18.lesson10.bonus03; /* Прайсы 2 CrUD для таблицы внутри файла Считать с консоли имя файла для операций CrUD Программа запускается с одним из следующих наборов параметров: -u id productName price quantity -d id Значения параметров: где id - 8 символов productName - название товара, 30 chars (60 bytes) price - цена, 8 символов quantity - количество, 4 символа -u - обновляет данные товара с заданным id -d - производит физическое удаление товара с заданным id (все данные, которые относятся к переданному id) В файле данные хранятся в следующей последовательности (без разделяющих пробелов): id productName price quantity Данные дополнены пробелами до их длины Пример: 19846 Шорты пляжные синие 159.00 12 198478 Шорты пляжные черные с рисунко173.00 17 19847983Куртка для сноубордистов, разм10173.991234 */ import java.io.*; import java.util.ArrayList; public class Solution { public static void main(String[] args) { String sId, str; int id, idMax = 0, count = -1; int[] len = {2, 8, 30, 8, 4}; ArrayList<String> list = new ArrayList<>(); if (!"-u".equals(args[0]) && !"-d".equals(args[0]) && args.length < 2) return; try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); String fileName = reader.readLine(); reader.close(); BufferedReader reader2 = new BufferedReader(new FileReader(fileName)); while (reader2.ready()) { str = sId = reader2.readLine(); count++; list.add(sId); if (sId.equals("")) sId = "0 "; sId = sId.substring(0, 8); if (sId.contains(" ")) sId = sId.substring(0, sId.indexOf(" ")); if (args[1].equals(sId)) { if ("-d".equals(args[0])) list.remove(count); else if ("-u".equals(args[0])) { for (int i = 1; i < 5; i++) args[i] = cutOrFillToSize(args[i], len[i]); list.remove(count); list.add(count, args[1] + args[2] + args[3] + args[4]); } } } reader2.close(); FileWriter writer = new FileWriter(fileName); for (String st :list) writer.write(st + "\n"); writer.flush(); writer.close(); } catch (IOException exc) {exc.printStackTrace();} } static String cutOrFillToSize (String str, int len) { if (str.length() > len) str = str.substring(0, len); for (int i = str.length(); i < len; i++) str += " "; return str; } } <file_sep>/src/com/javarush/test/level33/lesson15/big01/strategies/FileStorageStrategy.java package com.javarush.test.level33.lesson15.big01.strategies; public class FileStorageStrategy implements StorageStrategy { private FileBucket[] table = new FileBucket[DEFAULT_INITIAL_CAPACITY]; private long bucketSizeLimit = 10000; private static final int DEFAULT_INITIAL_CAPACITY = 16; public FileStorageStrategy() { for (int i = 0; i < table.length; i++) table[i] = new FileBucket(); } public long getBucketSizeLimit() { return bucketSizeLimit; } public void setBucketSizeLimit(long bucketSizeLimit) { this.bucketSizeLimit = bucketSizeLimit; } private int hash(Long k) { return k.hashCode(); } private int indexFor(int hash, int length) { return hash & (length - 1); } private Entry getEntry(Long key) { int hash = (key == null) ? 0 : hash(key); for (Entry e = table[indexFor(hash, table.length)].getEntry(); e != null; e = e.next) { Long k; if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) return e; } return null; } private void resize(int newCapacity) { FileBucket[] oldTable = table; int oldCapacity = oldTable.length; if (oldCapacity == 1 << 30) { return; } FileBucket[] newTable = new FileBucket[newCapacity]; transfer(newTable); for (FileBucket bucket : table) bucket.remove(); table = newTable; } private void transfer(FileBucket[] newTable) { FileBucket[] src = table; int newCapacity = newTable.length; for (int j = 0; j < src.length; j++) { Entry e = src[j].getEntry(); if (e != null) { src[j] = null; do { Entry next = e.next; int i = indexFor(e.hash, newCapacity); e.next = newTable[i].getEntry(); newTable[i].putEntry(e); e = next; } while (e != null); } } } private void addEntry(int hash, Long key, String value, int bucketIndex) { if (table[bucketIndex] == null) table[bucketIndex] = new FileBucket(); Entry e = table[bucketIndex].getEntry(); table[bucketIndex].putEntry(new Entry(hash, key, value, e)); if (table[bucketIndex].getFileSize() > bucketSizeLimit) resize(2 * table.length); } private void createEntry(int hash, Long key, String value, int bucketIndex) { if (table[bucketIndex] == null) table[bucketIndex] = new FileBucket(); Entry e = table[bucketIndex].getEntry(); table[bucketIndex].putEntry(new Entry(hash, key, value, e)); } @Override public boolean containsKey(Long key) { return getEntry(key) != null; } @Override public boolean containsValue(String value) { if (value == null) return false; FileBucket[] tab = table; for (int i = 0; i < tab.length; i++) { if (tab[i] != null) for (Entry e = tab[i].getEntry(); e != null; e = e.next) if (value.equals(e.value)) return true; } return false; } @Override public void put(Long key, String value) { if (key == null) return; int hash = hash(key); int i = indexFor(hash, table.length); addEntry(hash, key, value, i); } @Override public Long getKey(String value) { if (value == null) return 0L; for (FileBucket bucket : table) { while (bucket != null && bucket.getEntry() != null) { if (value.equals(bucket.getEntry().getValue())) return bucket.getEntry().key; bucket.putEntry(bucket.getEntry().next); } } return null; } @Override public String getValue(Long key) { if (key == null) return null; int hash = hash(key); if (table[indexFor(hash, table.length)] == null) return null; for (Entry e = table[indexFor(hash, table.length)].getEntry(); e != null; e = e.next) { Long k; if (e.hash == hash && ((k = e.key) == key || key.equals(k))) return e.value; } return null; } } <file_sep>/src/com/javarush/test/level33/lesson10/bonus01/TestClass.java package com.javarush.test.level33.lesson10.bonus01; import javax.xml.bind.annotation.XmlRootElement; import java.util.ArrayList; @XmlRootElement public class TestClass { public ArrayList<String> tagString = new ArrayList<>(); }<file_sep>/src/com/javarush/test/level26/lesson15/big01/CurrencyManipulatorFactory.java package com.javarush.test.level26.lesson15.big01; import java.util.Collection; import java.util.HashMap; public class CurrencyManipulatorFactory { private CurrencyManipulatorFactory(){} public static HashMap<String, CurrencyManipulator> hmap = new HashMap(); public static CurrencyManipulator getManipulatorByCurrencyCode(String currencyCode){ if (!hmap.containsKey(currencyCode)) hmap.put(currencyCode, new CurrencyManipulator(currencyCode)); return hmap.get(currencyCode); } public static Collection<CurrencyManipulator> getAllCurrencyManipulators() {return hmap.values();} } <file_sep>/src/com/javarush/test/level33/lesson10/bonus01/Solution.java package com.javarush.test.level33.lesson10.bonus01; import org.w3c.dom.Comment; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Marshaller; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.OutputKeys; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import java.io.StringWriter; import java.util.Queue; import java.util.concurrent.LinkedBlockingDeque; /* Комментарий внутри xml Реализовать метод toXmlWithComment, который должен возвращать строку - xml представление объекта obj. В строке перед каждым тэгом tagName должен быть вставлен комментарий comment. Сериализация obj в xml может содержать CDATA с искомым тегом. Перед ним вставлять комментарий не нужно. Пример вызова: toXmlWithComment(firstSecondObject, "second", "it's a comment") Пример результата: <?xml version="1.0" encoding="UTF-8" standalone="no"?> <first> <!--it's a comment--> <second>some string</second> <!--it's a comment--> <second>some string</second> <!--it's a comment--> <second><![CDATA[need CDATA because of < and >]]></second> <!--it's a comment--> <second/> </first> */ public class Solution { public static String toXmlWithComment(Object obj, String tagName, String comment) { StringWriter writer = new StringWriter(); try { JAXBContext context = JAXBContext.newInstance(obj.getClass()); Marshaller marshaller = context.createMarshaller(); marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument(); marshaller.marshal(obj, doc); NodeList nodes = doc.getElementsByTagName("*"); for (int i = 0; i < nodes.getLength(); i++) { Comment comm = doc.createComment(comment); if (nodes.item(i).getNodeName().equals(tagName)) nodes.item(i).getParentNode().insertBefore(comm, nodes.item(i)); } nodes = doc.getElementsByTagName("*"); Queue<Node> nodeQueue = new LinkedBlockingDeque<>(); nodeQueue.offer(nodes.item(0)); while (!nodeQueue.isEmpty()) { Node subNode = nodeQueue.poll(); if (subNode.getNodeType() == 3 && subNode.getTextContent().matches(".*[\"'&<>/]+.*")) { Node cdataSection = doc.createCDATASection(subNode.getTextContent()); subNode.getParentNode().replaceChild(cdataSection, subNode); } if (subNode.hasChildNodes()) { NodeList subNodes = subNode.getChildNodes(); for (int i = 0; i < subNodes.getLength(); i++) nodeQueue.offer(subNodes.item(i)); } } Transformer transformer = TransformerFactory.newInstance().newTransformer(); transformer.setOutputProperty(OutputKeys.INDENT, "yes"); transformer.transform(new DOMSource(doc), new StreamResult(writer)); } catch (JAXBException | ParserConfigurationException | TransformerException e) { e.printStackTrace(); } return writer.toString(); } public static void main(String[] args) { TestClass testClass = new TestClass(); testClass.tagString.add("one"); testClass.tagString.add("two"); testClass.tagString.add("three"); testClass.tagString.add("here need a CDATA tag: <tagString> some text </tagString>"); testClass.tagString.add("four"); testClass.tagString.add("five"); String res = toXmlWithComment(testClass, "tagString", "Some comment"); System.out.println(); System.out.println(res); } } <file_sep>/src/com/javarush/test/level09/lesson11/home08/Solution.java package com.javarush.test.level09.lesson11.home08; import java.util.ArrayList; /* Список из массивов чисел Создать список, элементами которого будут массивы чисел. Добавить в список пять объектов–массивов длиной 5, 2, 4, 7, 0 соответственно. Заполнить массивы любыми данными и вывести их на экран. */ public class Solution { public static void main(String[] args) { ArrayList<int[]> list = createList(); printList(list); } public static ArrayList<int[]> createList() { //напишите тут ваш код ArrayList<int[]> in = new ArrayList<int[]>(); int[] i1 = {1, 2, 3, 4 ,5}, i2 = {6, 7}, i3 = {8, 9, 0, 1}, i4 = {2, 3, 4 ,5 ,6 ,7 ,8}, i5 = {}; in.add(i1); in.add(i2); in.add(i3); in.add(i4); in.add(i5); return in; } public static void printList(ArrayList<int[]> list) { for (int[] array: list ) { for (int x: array) { System.out.println(x); } } } } <file_sep>/src/com/javarush/test/level08/lesson11/home06/Solution.java package com.javarush.test.level08.lesson11.home06; /* Вся семья в сборе 1. Создай класс Human с полями имя (String), пол (boolean), возраст (int), дети (ArrayList<Human>). 2. Создай объекты и заполни их так, чтобы получилось: два дедушки, две бабушки, отец, мать, трое детей. 3. Вывести все объекты Human на экран. */ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; public class Solution { public static void main(String[] args) { //напишите тут ваш код Human child1 = new Human("ik,mjuyh", true, 22, new ArrayList()); Human child2 = new Human("k,opijjoij", false, 20, new ArrayList()); Human child3 = new Human("pkjmjijjrd", true, 10, new ArrayList()); ArrayList<Human> child = new ArrayList<Human>(); Collections.addAll(child, child1, child2, child3); Human father = new Human("piooi", true, 44, child); Human mother = new Human("hybyb", true, 33, child); ArrayList<Human> parents = new ArrayList<Human>(); Collections.addAll(parents, father, mother); Human gF1 = new Human("qwed", true, 90, parents); Human gF2 = new Human("fgr", true, 80, parents); Human gM1 = new Human("vb", false, 88, parents); Human gM2 = new Human("lk", false, 77, parents); System.out.println(gF1); System.out.println(gF2); System.out.println(gM1); System.out.println(gM2); System.out.println(father); System.out.println(mother); System.out.println(child1); System.out.println(child2); System.out.println(child3); } public static class Human { //напишите тут ваш код String name = ""; boolean sex = true; int age = 0; ArrayList<Human> children; Human(String name, boolean sex, int age, ArrayList children) { this.name = name; this.sex = sex; this.age = age; this.children = children; } public String toString() { String text = ""; text += "Имя: " + this.name; text += ", пол: " + (this.sex ? "мужской" : "женский"); text += ", возраст: " + this.age; int childCount = this.children.size(); if (childCount > 0) { text += ", дети: "+this.children.get(0).name; for (int i = 1; i < childCount; i++) { Human child = this.children.get(i); text += ", "+child.name; } } return text; } } } <file_sep>/src/com/javarush/test/level30/lesson15/big01/MessageType.java package com.javarush.test.level30.lesson15.big01; public enum MessageType { NAME_REQUEST, USER_NAME, NAME_ACCEPTED, TEXT, USER_ADDED, USER_REMOVED } <file_sep>/src/com/javarush/test/level22/lesson09/task01/Solution.java package com.javarush.test.level22.lesson09.task01; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Scanner; /* Обращенные слова В методе main с консоли считать имя файла, который содержит слова, разделенные пробелами. Найти в тексте все пары слов, которые являются обращением друг друга. Добавить их в result. Порядок слов first/second не влияет на тестирование. Использовать StringBuilder. Пример содержимого файла рот тор торт о о тот тот тот Вывод: рот тор о о тот тот */ public class Solution { public static List<Pair> result = new LinkedList<>(); public static void main(String[] args) { boolean contains = false; String first = "", second = ""; ArrayList<String> arr = new ArrayList<>(); try { Scanner scanner = new Scanner(new FileInputStream(new BufferedReader(new InputStreamReader(System.in)).readLine())); while (scanner.hasNext()) arr.add(scanner.next()); scanner.close(); }catch (IOException exc){exc.getMessage();} for (int i = 0; i < arr.size();){ for (int j = i + 1; j < arr.size();){ contains = false; first = arr.get(i); second = arr.get(j); StringBuilder sb = new StringBuilder(second).reverse(); if (first.equals(sb.toString())){ Pair p = new Pair(); p.first = first; p.second = second; for (Pair pr : result) if (pr.first.equals(first) || pr.second.equals(first)) contains = true; if (!contains) result.add(p); } j++; } i++; } for (Pair p : result) System.out.println(p); } public static class Pair { String first; String second; @Override public String toString() { return first == null && second == null ? "" : first == null && second != null ? second : second == null && first != null ? first : first.compareTo(second) < 0 ? first + " " + second : second + " " + first; } } } <file_sep>/src/com/javarush/test/level22/lesson05/home01/TooShortStringSecondThreadException.java package com.javarush.test.level22.lesson05.home01; public class TooShortStringSecondThreadException extends RuntimeException { public TooShortStringSecondThreadException(Throwable t){super(t);} } <file_sep>/src/com/javarush/test/level08/lesson11/home03/Solution.java package com.javarush.test.level08.lesson11.home03; import java.util.HashMap; import java.util.Map; /* Люди с одинаковыми именами и/или фамилиями 1. Создать словарь Map (<String, String>) и добавить туда 10 человек в виде «Фамилия»-«Имя». 2. Пусть среди этих 10 человек есть люди с одинаковыми именами. 3. Пусть среди этих 10 человек есть люди с одинаковыми фамилиями. 4. Вывести содержимое Map на экран. */ public class Solution { public static void main(String[] args) { Map<String, String> map = createPeopleList(); printPeopleList(map); } public static Map<String, String> createPeopleList() { //напишите тут ваш код Map<String, String> map = new HashMap<String, String>(); map.put("абыр", "кагадыр"); map.put("валг", "бабарг"); map.put("кабдыщ", "бум"); map.put("йокар", "бум"); map.put("валг", "гыча"); map.put("траляля", "молоко"); map.put("труляля", ")))"); map.put("абыр", "бум"); map.put("ляляля", "ыва"); map.put("гыыгыг", "ывафыва"); return map; } public static void printPeopleList(Map<String, String> map) { for (Map.Entry<String, String> s : map.entrySet()) { System.out.println(s.getKey() + " " + s.getValue()); } } } <file_sep>/src/com/javarush/test/level18/lesson05/task03/Solution.java package com.javarush.test.level18.lesson05.task03; /* Разделение файла Считать с консоли три имени файла: файл1, файл2, файл3. Разделить файл1 по следующему критерию: Первую половину байт записать в файл2, вторую половину байт записать в файл3. Если в файл1 количество байт нечетное, то файл2 должен содержать бОльшую часть. Закрыть потоки. Не использовать try-with-resources */ import java.io.*; public class Solution { public static void main(String[] args) { String[] fileName = new String[3]; int count = 1, fileByte; try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); fileName[0] = reader.readLine(); fileName[1] = reader.readLine(); fileName[2] = reader.readLine(); FileInputStream fis = new FileInputStream(fileName[0]); FileOutputStream fos2 = new FileOutputStream(fileName[1]); FileOutputStream fos3 = new FileOutputStream(fileName[2]); int mid = (int)(Math.ceil(fis.available()/2.0)); while (count <= mid) { fileByte = fis.read(); fos2.write(fileByte); count++; } while (fis.available() > 0) { fileByte = fis.read(); fos3.write(fileByte); } reader.close(); fis.close(); fos2.close(); fos3.close(); } catch (IOException exc) { exc.printStackTrace();} } } <file_sep>/src/com/javarush/test/level30/lesson15/big01/client/BotClient.java package com.javarush.test.level30.lesson15.big01.client; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.Calendar; import java.util.Date; public class BotClient extends Client { private static int botNumber = 0; public class BotSocketThread extends SocketThread{ @Override protected void clientMainLoop() throws IOException, ClassNotFoundException { sendTextMessage("Привет чатику. Я бот. Понимаю команды: дата, день, месяц, год, время, час, минуты, секунды."); super.clientMainLoop(); } @Override protected void processIncomingMessage(String message) { super.processIncomingMessage(message); if (!message.matches(".+: .+")) return; String name = message.substring(0, message.indexOf(": ")); String text = message.substring(message.indexOf(": ") + 2); SimpleDateFormat format = null; switch (text){ case "дата" : format = new SimpleDateFormat("d.MM.YYYY"); break; case "день" : format = new SimpleDateFormat("d"); break; case "месяц" : format = new SimpleDateFormat("MMMM"); break; case "год" : format = new SimpleDateFormat("YYYY"); break; case "время" : format = new SimpleDateFormat("H:mm:ss"); break; case "час" : format = new SimpleDateFormat("H"); break; case "минуты" : format = new SimpleDateFormat("m"); break; case "секунды" : format = new SimpleDateFormat("s"); break; } if (format != null){ Date date = Calendar.getInstance().getTime(); sendTextMessage("Информация для " + name + ": " + format.format(date)); } } } @Override protected SocketThread getSocketThread() { return new BotSocketThread(); } @Override protected boolean shouldSentTextFromConsole() { return false; } @Override protected String getUserName() { String botName = "date_bot_"; if (botNumber == 100) botNumber = -1; return botName + botNumber++; } public static void main(String[] args) { new BotClient().run(); } } <file_sep>/src/com/javarush/test/level19/lesson10/bonus01/Solution.java package com.javarush.test.level19.lesson10.bonus01; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.List; /* Отслеживаем изменения Считать в консоли 2 имени файла - file1, file2. Файлы содержат строки, file2 является обновленной версией file1, часть строк совпадают. Нужно создать объединенную версию строк, записать их в список lines Операции ADDED и REMOVED не могут идти подряд, они всегда разделены SAME Пример: оригинальный редактированный общий file1: file2: результат:(lines) строка1 строка1 SAME строка1 строка2 REMOVED строка2 строка3 строка3 SAME строка3 строка4 REMOVED строка4 строка5 строка5 SAME строка5 строка0 ADDED строка0 строка1 строка1 SAME строка1 строка2 REMOVED строка2 строка3 строка3 SAME строка3 строка5 ADDED строка5 строка4 строка4 SAME строка4 строка5 REMOVED строка5 */ public class Solution { public static List<LineItem> lines = new ArrayList<LineItem>(); public static void main(String[] args) { ArrayList<String> f1List = new ArrayList<>(); ArrayList<String> f2List = new ArrayList<>(); try{ BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); BufferedReader f1reader = new BufferedReader(new InputStreamReader(new FileInputStream(reader.readLine()))); BufferedReader f2reader = new BufferedReader(new InputStreamReader(new FileInputStream(reader.readLine()))); while (f1reader.ready()) f1List.add(f1reader.readLine()); while (f2reader.ready()) f2List.add(f2reader.readLine()); while ((f1List.size() > 0) && f2List.size() > 0) { if (f1List.get(0).equals(f2List.get(0))){ lines.add(new LineItem(Type.SAME, f1List.get(0))); f1List.remove(0); f2List.remove(0); }else if(f2List.size() > 1 && f1List.get(0).equals(f2List.get(1))){ lines.add(new LineItem(Type.ADDED, f2List.get(0))); f2List.remove(0); }else{ lines.add(new LineItem(Type.REMOVED, f1List.get(0))); f1List.remove(0); } } while (f1List.size() > 0){ lines.add(new LineItem(Type.REMOVED, f1List.get(0))); f1List.remove(0); } while (f2List.size() > 0){ lines.add(new LineItem(Type.ADDED, f2List.get(0))); f2List.remove(0); } reader.close(); f1reader.close(); f2reader.close(); } catch (Exception exc) {exc.printStackTrace();} } public static enum Type { ADDED, //добавлена новая строка REMOVED, //удалена строка SAME //без изменений } public static class LineItem { public Type type; public String line; public LineItem(Type type, String line) { this.type = type; this.line = line; } } } <file_sep>/src/com/javarush/test/level14/lesson08/bonus03/Singleton.java package com.javarush.test.level14.lesson08.bonus03; public class Singleton { static Singleton singleton; private Singleton() { } public static Singleton getInstance() { if (singleton == null) singleton = new Singleton(); return singleton; } } <file_sep>/src/com/javarush/test/level18/lesson10/home03/Solution.java package com.javarush.test.level18.lesson10.home03; /* Два в одном Считать с консоли 3 имени файла Записать в первый файл содержимого второго файла, а потом дописать в первый файл содержимое третьего файла Закрыть потоки. Не использовать try-with-resources */ import java.io.*; public class Solution { public static void main(String[] args) { try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); String file1Name = reader.readLine(); String file2Name = reader.readLine(); String file3Name = reader.readLine(); FileOutputStream fos1 = new FileOutputStream(file1Name, true); FileInputStream fis2 = new FileInputStream(file2Name); FileInputStream fis3 = new FileInputStream(file3Name); while (fis2.available() > 0) fos1.write(fis2.read()); while (fis3.available() > 0) fos1.write(fis3.read()); reader.close(); fos1.close(); fis2.close(); fis3.close(); } catch (IOException exc) {exc.printStackTrace();} } } <file_sep>/src/com/javarush/test/level22/lesson13/task01/Solution.java package com.javarush.test.level22.lesson13.task01; import java.util.ArrayList; import java.util.StringTokenizer; /* StringTokenizer Используя StringTokenizer разделить query на части по разделителю delimiter. Пример, getTokens("level22.lesson13.task01", ".") == {"level22", "lesson13", "task01"} */ public class Solution { public static String [] getTokens(String query, String delimiter){ ArrayList<String> arr = new ArrayList<>(); StringTokenizer st = new StringTokenizer(query, delimiter); while (st.hasMoreTokens()) arr.add(st.nextToken()); String[] str = new String[arr.size()]; return arr.toArray(str); } } <file_sep>/src/com/javarush/test/level22/lesson05/home01/TooShortStringFirstThreadException.java package com.javarush.test.level22.lesson05.home01; public class TooShortStringFirstThreadException extends RuntimeException { public TooShortStringFirstThreadException(Throwable t){super(t);} } <file_sep>/src/com/javarush/test/level06/lesson11/bonus03/Solution.java package com.javarush.test.level06.lesson11.bonus03; import java.io.BufferedReader; import java.io.InputStreamReader; /* Задача по алгоритмам Задача: Написать программу, которая вводит с клавиатуры 5 чисел и выводит их в возрастающем порядке. Пример ввода: 3 2 15 6 17 Пример вывода: 2 3 6 15 17 */ public class Solution { public static void main(String[] args) throws Exception { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); //напишите тут ваш код int temp, i, j; int array[] = new int[5]; for(i = 1; i <= 5; i++) array[i-1] = Integer.parseInt(reader.readLine()); for (j = 1; j < array.length; j++) for(i = array.length-1; i >= j; i--) if (array[i] < array[i-1]) { temp = array[i]; array[i] = array[i-1]; array[i-1] = temp; } for(i = 1; i <= array.length; i++) System.out.println(array[i-1]); } } <file_sep>/src/com/javarush/test/level28/lesson15/big01/view/HtmlView.java package com.javarush.test.level28.lesson15.big01.view; import com.javarush.test.level28.lesson15.big01.Controller; import com.javarush.test.level28.lesson15.big01.vo.Vacancy; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.List; public class HtmlView implements View { private Controller controller; private final String filePath = "./src/" + this.getClass().getPackage().getName().replace(".", "/") + "/vacancies.html"; @Override public void update(List<Vacancy> vacancies) { updateFile(getUpdatedFileContent(vacancies)); } @Override public void setController(Controller controller) { this.controller = controller; } public void userCitySelectEmulationMethod() { controller.onCitySelect("Odessa"); } private String getUpdatedFileContent(List<Vacancy> vacancies) { Document document = null; try { document = getDocument(); Element element = document.select(".template").first(); if (element == null) return ""; Element copyTemplate = element.clone(); copyTemplate.removeClass("template"); copyTemplate.removeAttr("style"); for (Element remove : document.select(".vacancy")) if (!remove.hasClass("template")) remove.remove(); for (Vacancy vacancy : vacancies) { Element vacancyTemplate = copyTemplate.clone(); vacancyTemplate.getElementsByClass("city").first().appendText(vacancy.getCity()); vacancyTemplate.getElementsByClass("companyName").first().appendText(vacancy.getCompanyName()); vacancyTemplate.getElementsByClass("salary").first().appendText(vacancy.getSalary()); vacancyTemplate.getElementsByTag("a").first().attr("href", vacancy.getUrl()).text(vacancy.getTitle()); element.before(vacancyTemplate.outerHtml()); } } catch (IOException e){ e.printStackTrace(); System.out.println("Some exception occurred"); } return document.toString(); } private void updateFile(String string) { try (FileWriter writer = new FileWriter(filePath)) { writer.write(string); writer.close(); } catch (IOException e) { e.printStackTrace(); } } protected Document getDocument() throws IOException { return Jsoup.parse(new File(filePath), "UTF-8"); } } <file_sep>/src/com/javarush/test/level19/lesson05/task03/Solution.java package com.javarush.test.level19.lesson05.task03; /* Выделяем числа Считать с консоли 2 имени файла. Вывести во второй файл все числа, которые есть в первом файле. Числа выводить через пробел. Закрыть потоки. Не использовать try-with-resources Пример тела файла: 12 text var2 14 8v 1 Результат: 12 14 1 */ import java.io.*; import java.util.Scanner; public class Solution { public static void main(String[] args) { String text = null; try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); String file1Name = reader.readLine(); String file2Name = reader.readLine(); reader.close(); FileWriter fw = new FileWriter(file2Name); Scanner scanner = new Scanner(new FileInputStream(file1Name)); while (scanner.hasNext()) { text = scanner.next(); if (text.matches("^[ 0-9]+$")) fw.write(text + " "); } scanner.close(); fw.close(); } catch (IOException exc) {exc.printStackTrace();} } } <file_sep>/src/com/javarush/test/level20/lesson10/bonus03/Solution.java package com.javarush.test.level20.lesson10.bonus03; import java.util.ArrayList; import java.util.List; /* Кроссворд 1. Дан двумерный массив, который содержит буквы английского алфавита в нижнем регистре. 2. Метод detectAllWords должен найти все слова из words в массиве crossword. 3. Элемент(startX, startY) должен соответствовать первой букве слова, элемент(endX, endY) - последней. text - это само слово, располагается между начальным и конечным элементами 4. Все слова есть в массиве. 5. Слова могут быть расположены горизонтально, вертикально и по диагонали как в нормальном, так и в обратном порядке. 6. Метод main не участвует в тестировании */ public class Solution { public static void main(String[] args) { int[][] crossword = new int[][]{ {'f', 'd', 'e', 'r', 'l', 'k'}, {'u', 's', 'a', 'm', 'e', 'o'}, {'l', 'n', 'g', 'r', 'o', 'v'}, {'m', 'l', 'p', 'r', 'r', 'h'}, {'p', 'o', 'e', 'e', 'j', 'j'} }; List<Word> wordsInCross = new ArrayList<>(); wordsInCross = detectAllWords(crossword, "home", "same", "rr"); for (Word w : wordsInCross) System.out.println(w); /* Ожидаемый результат home - (5, 3) - (2, 0) same - (1, 1) - (4, 1) */ } public static List<Word> detectAllWords(int[][] crossword, String... words) { ArrayList<Word> wordsInCross = new ArrayList<>(); int numWords = words.length, crossX = crossword[0].length + 2, crossY = crossword.length + 2, startX = 0, startY = 0, endX = 0, endY = 0, storeX = 0, storeY = 0; int [][] cross = new int[crossY][crossX], delta = {{0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1}, {1, 0}, {1, -1}}; boolean br = false; for (int y = 0; y < crossY-2; y++) for (int x = 0; x < crossX-2; x++) cross[y+1][x+1] = crossword[y][x]; for (int iWord = 0; iWord < numWords; iWord++){ for (int y = 1; y < crossY-1; y++) for (int x = 1; x < crossX-1; x++) if (cross[y][x] == words[iWord].charAt(0)){ startX = x; startY = y; for (int d = 0; d < delta.length; d++) if (cross[y + delta[d][0]][x + delta[d][1]] == words[iWord].charAt(1)){ storeX = x; storeY = y; br = false; for (int ch = 1; ch < words[iWord].length(); ch++) if (cross[y + delta[d][0]][x + delta[d][1]] == words[iWord].charAt(ch) && !br){ x = x + delta[d][1]; y = y + delta[d][0]; if (ch == words[iWord].length() - 1){ endX = x; endY = y; Word w = new Word(words[iWord]); w.setStartPoint(startX-1, startY-1); w.setEndPoint(endX-1, endY-1); wordsInCross.add(w); x = storeX; y = storeY; } }else{ x = storeX; y = storeY; br = true; break; } } } } return wordsInCross; } public static class Word { private String text; private int startX; private int startY; private int endX; private int endY; public Word(String text) { this.text = text; } public void setStartPoint(int i, int j) { startX = i; startY = j; } public void setEndPoint(int i, int j) { endX = i; endY = j; } @Override public String toString() { return String.format("%s - (%d, %d) - (%d, %d)", text, startX, startY, endX, endY); } } } <file_sep>/src/com/javarush/test/level31/lesson06/home01/Solution.java package com.javarush.test.level31.lesson06.home01; import java.io.*; import java.nio.file.Path; import java.nio.file.Paths; import java.util.HashMap; import java.util.Map; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; /* Добавление файла в архив В метод main приходит список аргументов. Первый аргумент - полный путь к файлу fileName. Второй аргумент - путь к zip-архиву. Добавить файл (fileName) внутрь архива в директорию 'new'. Если в архиве есть файл с таким именем, то заменить его. Пример входных данных: C:/result.mp3 C:/pathToTest/test.zip Файлы внутри test.zip: a.txt b.txt После запуска Solution.main архив test.zip должен иметь такое содержимое: new/result.mp3 a.txt b.txt Подсказка: нужно сначала куда-то сохранить содержимое всех энтри, а потом записать в архив все энтри вместе с добавленным файлом. Пользоваться файловой системой нельзя. КУСОК ГОВНА! ВАЛИДАТОР ПРИНИМАЕТ ТОЛЬКО ЕСЛИ ЗАПИСЫВАТЬ ФАЙЛ В АРХИВ ЕСЛИ ЕГО НЕТ! */ public class Solution { public static Map<ZipEntry, byte[]> entryMap = new HashMap<>(); public static void main(String[] args) throws IOException { Path inputFile = Paths.get(args[0]); Path archive = Paths.get(args[1]); readZipFile(archive); writeZipFile(archive, inputFile); } public static void readZipFile(Path archive) throws IOException{ ZipInputStream zis = new ZipInputStream(new FileInputStream(archive.toFile())); ZipEntry entry; while ((entry = zis.getNextEntry()) != null){ ByteArrayOutputStream baos = new ByteArrayOutputStream(); int count; byte[] buffer = new byte[1024]; while ((count = zis.read(buffer)) != -1) baos.write(buffer, 0, count); byte[] entryBytes = baos.toByteArray(); entryMap.put(entry, entryBytes); } zis.close(); } public static void writeZipFile(Path archive, Path inputFile) throws IOException{ String name = inputFile.getFileName().toString(); boolean isExist = false; ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(archive.toFile())); for (Map.Entry<ZipEntry, byte[]> entry : entryMap.entrySet()){ if (entry.getKey().getName().equals(name)){ isExist = true; continue; } zos.putNextEntry(new ZipEntry(entry.getKey().getName())); zos.write(entry.getValue()); zos.closeEntry(); } if (isExist){ name = "new/" + name; zos.putNextEntry(new ZipEntry(name)); FileInputStream fis = new FileInputStream(inputFile.toFile()); byte[] buffer = new byte[fis.available()]; fis.read(buffer); zos.write(buffer); zos.closeEntry(); } zos.close(); } } <file_sep>/src/com/javarush/test/level19/lesson05/task01/Solution.java package com.javarush.test.level19.lesson05.task01; /* Четные байты Считать с консоли 2 имени файла. Вывести во второй файл все байты с четным индексом. Пример: второй байт, четвертый байт, шестой байт и т.д. Закрыть потоки ввода-вывода. */ import java.io.*; public class Solution { public static void main(String[] args) { try { BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); String file1Name = reader.readLine(); String file2Name = reader.readLine(); reader.close(); FileInputStream fis = new FileInputStream(file1Name); FileOutputStream fos = new FileOutputStream(file2Name); while (fis.available() > 1) { fis.read(); fos.write(fis.read()); } fis.close(); fos.close(); } catch (IOException exc) {exc.printStackTrace();} } } <file_sep>/src/com/javarush/test/level03/lesson06/task05/Solution.java package com.javarush.test.level03.lesson06.task05; /* Изучаем японский Выведи на экран 日本語 */ public class Solution { public static void main(String[] args) { /* char c1 = '日'; char c2 = '本'; char c3 = '語'; int i1 = c1; int i2 = c2; int i3 = c3; System.out.println(i1 + " " + i2 + " " + i3); */ char c1 = 26085; char c2 = 26412; char c3 = 35486; System.out.print(c1); System.out.print(c2); System.out.print(c3); //напишите тут ваш код } }<file_sep>/src/com/javarush/test/level32/lesson06/task01/Solution.java package com.javarush.test.level32.lesson06.task01; import java.io.*; import java.util.concurrent.ThreadLocalRandom; /* Генератор паролей Реализуйте логику метода getPassword, который должен возвращать ByteArrayOutputStream, в котором будут байты пароля. Требования к паролю: 1) 8 символов 2) только цифры и латинские буквы разного регистра 3) обязательно должны присутствовать цифры, и буквы разного регистра Все сгенерированные пароли должны быть уникальные. Пример правильного пароля: wMh7SmNu */ public class Solution { public static void main(String[] args) { for (int i = 0; i < 100; i++) { ByteArrayOutputStream password = getPassword(); System.out.println(password.toString()); } } public static ByteArrayOutputStream getPassword() { byte[] pass = new byte[8]; int countInts = 0; int countUpper = 0; int countLower = 0; ByteArrayOutputStream baos = new ByteArrayOutputStream(); for (int i = 0; i < 8; i++){ int choise = ThreadLocalRandom.current().nextInt(1,3); if (i > 4) if (countInts == 0) choise = 1; else if (countUpper == 0) choise = 2; else if (countLower == 0) choise = 3; switch (choise) { case 1 : pass[i] = (byte)ThreadLocalRandom.current().nextInt(48, 58); countInts++; break; case 2 : pass[i] = (byte)ThreadLocalRandom.current().nextInt(65, 91); countUpper++; break; case 3 : pass[i] = (byte)ThreadLocalRandom.current().nextInt(97, 123); countLower++; break; } } try (ByteArrayInputStream bais = new ByteArrayInputStream(pass)) { byte[] buffer = new byte[bais.available()]; bais.read(buffer); baos.write(buffer); } catch (IOException e){} return baos; } } <file_sep>/src/com/javarush/test/level08/lesson08/task01/Solution.java package com.javarush.test.level08.lesson08.task01; import java.util.Collections; import java.util.HashSet; import java.util.Set; /* 20 слов на букву «Л» Создать множество строк (Set<String>), занести в него 20 слов на букву «Л». */ public class Solution { public static HashSet<String> createSet() { //напишите тут ваш код HashSet<String> sstr = new HashSet<String>(); Collections.addAll(sstr, "Лыба", "Лак", "Лолбаса", "Ладио", "Лабота", "Лавиатура", "Лышка", "Лонитор", "Лелевизор", "Лиск", "Лесня", "Лечь", "Лтол", "Лтул", "Локно", "Лветок", "Ллово", "Литор", "Лакароны", "Ляблоко"); return sstr; } } <file_sep>/src/com/javarush/test/level25/lesson09/task01/UncaughtExceptionHandler.java package com.javarush.test.level25.lesson09.task01; public class UncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { @Override public void uncaughtException(Thread t, Throwable e) { Throwable thr = e; if (thr instanceof Error) System.out.println("Нельзя дальше работать"); else if (thr instanceof Exception) System.out.println("Надо обработать"); else System.out.println("ХЗ"); } } <file_sep>/src/com/javarush/test/level22/lesson09/task03/Solution.java package com.javarush.test.level22.lesson09.task03; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Scanner; /* Составить цепочку слов В методе main считайте с консоли имя файла, который содержит слова, разделенные пробелом. В методе getLine используя StringBuilder расставить все слова в таком порядке, чтобы последняя буква данного слова совпадала с первой буквой следующего не учитывая регистр. Каждое слово должно участвовать 1 раз. Метод getLine должен возвращать любой вариант. Слова разделять пробелом. В файле не обязательно будет много слов. Пример тела входного файла: <NAME>-Йорк <NAME> Результат: <NAME>-Йорк <NAME> */ public class Solution { public static void main(String[] args) { //... ArrayList<String> arr = new ArrayList<>(); try(Scanner scanner = new Scanner(new FileInputStream(new BufferedReader(new InputStreamReader(System.in)).readLine()))){ while (scanner.hasNext()) arr.add(scanner.next()); }catch (IOException exc){exc.printStackTrace();} String[] str = new String[arr.size()]; arr.toArray(str); StringBuilder result = getLine(str); System.out.println(result.toString()); } public static StringBuilder getLine(String... words) { if (words.length == 0 || words == null) return new StringBuilder(); if (words.length == 1) return new StringBuilder(words[0]); boolean is = true; ArrayList<String> arr = new ArrayList(Arrays.asList(words)); Collections.shuffle(arr); StringBuilder sb = new StringBuilder(arr.get(0)); arr.set(0, ""); while (is){ is = false; for (int i = 0; i < arr.size(); i++){ if (!arr.get(i).equals("") && (sb.toString().substring(sb.length() - 1)).toLowerCase().equals(arr.get(i).substring(0, 1).toLowerCase())){ sb.append(" " + arr.get(i)); arr.set(i, ""); i = 0; is = true; Collections.shuffle(arr); } } } is = true; while (is){ is = false; for (int i = 0; i < arr.size(); i++){ if (!arr.get(i).equals("") && (sb.toString().substring(0, 1).toLowerCase().equals(arr.get(i).substring(arr.get(i).length() - 1).toLowerCase()))){ sb.insert(0, arr.get(i) + " "); arr.set(i, ""); i = 0; is = true; Collections.shuffle(arr); } } } return sb; } }
0a6d7c44dd5f3c6e466a291f57c17c2bbbca03ff
[ "Java" ]
39
Java
FolkGAS/JavaRushHomeWork
da4ef81ed7210cba3f016fb5ab9028712a84172d
cbe3b95311dcaeebe66541632b67fe328f93e559
refs/heads/master
<repo_name>joprice/playlists<file_sep>/app/playlist-draft.service.ts import { Injectable } from '@angular/core'; import { Headers, Http, URLSearchParams } from '@angular/http'; import 'rxjs/add/operator/toPromise'; import {Observable} from "rxjs/Observable"; import {List} from 'immutable'; import {BehaviorSubject} from "rxjs/Rx"; import {Playlist, PlaylistTrack} from './models'; import {TrackSearchService} from "./track-search.service"; import {ReTokenService} from "./re-token.service"; declare var erinyesConfig:any; @Injectable() export class PlaylistDraftService { protected imageBucket = erinyesConfig.imageBucket; protected playlistBucket = erinyesConfig.playlistBucket; protected objectDbUrl = erinyesConfig.radioeditApiURL + '/api/rpc'; protected storageUrl = erinyesConfig.radioeditApiURL + '/storage'; protected publicImageURIPrefix = erinyesConfig.uploadImageURIPrefix; protected ampPlaylistUrl = 'http://qa-ampinternal.ihrcloud.net/api/v3/collection/user/283087033/collection'; protected ampProfileId = '283087033'; protected ampSessionId = 'lr6QMyPPx/leYJEJ0iTG5A=='; protected token = ''; private _playlists = new BehaviorSubject<List<Playlist>>(List<Playlist>()); public playlists = this._playlists.asObservable(); constructor(protected http: Http, protected trackSearchService:TrackSearchService, protected reTokenService:ReTokenService) { this.reTokenService.cacheToken.subscribe(token => { console.log("playlist token", token); if (token) { this.token = token; this.loadPlaylists(); } }); } protected loadPlaylists() { if (this._playlists.getValue().count() > 0) { return; } console.log("cache token", this.token); this.http .post(this.objectDbUrl, JSON.stringify({ "id": "1", "jsonrpc": "2.0", "method": "query", "params": { "bucket": this.playlistBucket, "expression": { "version": null } } }), {headers: new Headers({ 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + this.token })}) // TODO HTTP error handling. //.catch(this.handleError) .subscribe(res => { let out = res.json(); console.log(out); let ps:Playlist[] = []; for (let i:number=0; i < out.result.results.length; i++) { // TODO Should I just store images in a different bucket? Probably. if (!out.result.results[i]._id.startsWith("/images/") && !out.result.results[i]._id.startsWith("/genre/")) { ps.push(this.buildPlaylistFromResource(out.result.results[i])); } } this._playlists.next(List(ps)); }) } protected buildPlaylistFromResource(resource:any):Playlist { let p = new Playlist(); p.id = resource._id; p.title = resource.object.title; p.description = resource.object.description; p.header = resource.object.header; p.author = resource.object.author; p.hasChanges = resource.object.hasChanges; p.isPublished = resource.object.isPublished; p.publishedPlaylistId = resource.object.publishedPlaylistId; p.publishedUserId = resource.object.publishedUserId; p.urlMobile = resource.object.urlMobile; p.urlWeb = resource.object.urlWeb; p.urlImage = resource.object.urlImage; p.imageType = resource.object.imageType; p.customImagePath = resource.object.customImagePath; p.tracks = []; p.dateCreated = new Date(resource.ctime as number); p.dateUpdated = new Date(resource.mtime as number); for (let j:number=0; j < resource.object.tracks.length; j++) { let track:PlaylistTrack = new PlaylistTrack(); track.productId = resource.object.tracks[j]; p.tracks.push(track); } return p; } fillInPlaylist(playlist:Playlist) { if (playlist.tracks.length > 0) { // Refresh the details. let trackIds: number[] = []; for (let j=0; j < playlist.tracks.length; j++) { trackIds.push(playlist.tracks[j].productId); } this.trackSearchService.getDetails(trackIds) .then(trackDetails => { let pt: PlaylistTrack[] = []; for (let k:number=0; k < trackDetails.length; k++) { let track:PlaylistTrack = new PlaylistTrack(); track.productId = trackDetails[k].id; track.title = trackDetails[k].title; track.album = trackDetails[k].albumName; track.artist = trackDetails[k].artistName; pt.push(track); } playlist.tracks = pt; }); } } publish(playlist:Playlist) { // TODO Actually publish. playlist.hasChanges = false; playlist.isPublished = true; let tracks:number[] = []; for (let i=0; i < playlist.tracks.length; i++) { tracks.push(playlist.tracks[i].productId); } let body = { "name": playlist.title, "tracks": tracks, "author": playlist.author ? playlist.author : "_", "description": playlist.description ? playlist.description : "_", "curated": true, "shareable": true, "includeIds": false }; if (playlist.imageType == "upload") { body["imageUrl"] = this.publicImageURIPrefix + playlist.customImagePath; } else { //body["imageUrl"] = ""; } let runner; if (playlist.publishedPlaylistId) { runner = this.http.put(this.ampPlaylistUrl + "/" + playlist.publishedPlaylistId, JSON.stringify(body), { headers: new Headers({ 'Content-Type': 'application/json', 'X-IHR-Profile-ID': this.ampProfileId, 'X-IHR-Session-ID': this.ampSessionId }) }); } else { runner = this.http.post(this.ampPlaylistUrl, JSON.stringify(body), { headers: new Headers({ 'Content-Type': 'application/json', 'X-IHR-Profile-ID': this.ampProfileId, 'X-IHR-Session-ID': this.ampSessionId }) }); } runner.subscribe(res => { console.log("amp save", res.json()); let data = res.json().data; let ps = this._playlists.getValue(); let index = ps.findIndex((p: Playlist) => p.id === playlist.id); if (index === -1) { // Do nothing, something bad happened. alert("There was a significant error with the save to AMP. Please send the following information to the administrator: " + data); } else { if (playlist.author != data.author) { alert("Warning: Author did not match on save to AMP.") } else if (playlist.description != data.description) { alert("Warning: Description did not match on save to AMP.") } else if (playlist.title != data.name) { alert("Warning: Title did not match on save to AMP.") } playlist.urlWeb = data.urls.web; playlist.urlMobile = data.urls.goto; playlist.urlImage = data.urls.image; playlist.publishedPlaylistId = data.id; playlist.publishedUserId = data.userId; this._playlists.next(ps.set(index, playlist)); this.save(playlist); // Actually publish, since this is now a separate operation. // TODO Separate these behaviors better. this.http.post(this.ampPlaylistUrl + "/" + playlist.publishedPlaylistId + "/publish", "{}", { headers: new Headers({ 'Content-Type': 'application/json', 'X-IHR-Profile-ID': this.ampProfileId, 'X-IHR-Session-ID': this.ampSessionId }) }).subscribe(res => { console.log("res on publish", res); }); } }); this.save(playlist); } unpublish(playlist:Playlist, saveAfter:boolean=false) { if (!playlist.publishedPlaylistId) { console.log("no published playlist ID."); return; } return this.http .delete(this.ampPlaylistUrl + "/" + playlist.publishedPlaylistId, { body: "", headers: new Headers({ 'Content-Type': 'application/json', 'X-IHR-Profile-ID': this.ampProfileId, 'X-IHR-Session-ID': this.ampSessionId }) }) .subscribe(res => { let ps = this._playlists.getValue(); let index = ps.findIndex((p: Playlist) => p.id === playlist.id); if (index === -1) { // Do nothing, something bad happened. } else { playlist.isPublished = false; playlist.hasChanges = true; playlist.publishedUserId = ""; playlist.publishedPlaylistId = ""; playlist.urlWeb = ""; playlist.urlMobile = ""; this._playlists.next(ps.set(index, playlist)); if (saveAfter) { this.save(playlist); } } }); } delete(playlist:Playlist) { console.log("playlist to delete", playlist); this.unpublish(playlist); return this.http.post(this.objectDbUrl, JSON.stringify({ "id": "1", "jsonrpc": "2.0", "method": "delete", "params": { "_id": playlist.id, "bucket": this.playlistBucket, "immediate": true } }), {headers: new Headers({ 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + this.token })}) .subscribe(res => { console.log("deleted data", res.json()); let ps = this._playlists.getValue(); let index = ps.findIndex((p: Playlist) => p.id === playlist.id); if (index === -1) { } else { this._playlists.next(ps.delete(index)); } }) } save(playlist:Playlist) { console.trace("playlist to save", playlist); let tracks:number[] = []; for (let i=0; i < playlist.tracks.length; i++) { tracks.push(playlist.tracks[i].productId); } // TODO This should probably happen sometime before here. if (playlist.title) { playlist.title = playlist.title.trim(); } if (playlist.description) { playlist.description = playlist.description.trim(); } if (playlist.header) { playlist.header = playlist.header.trim(); } if (playlist.author) { playlist.author = playlist.author.trim(); } return this.http .post(this.objectDbUrl, JSON.stringify({ "id": "1", "jsonrpc": "2.0", "method": "save", "params": { "_id": playlist.id, "bucket": this.playlistBucket, "properties": { "title": playlist.title, "description": playlist.description, "header": playlist.header, "tracks": tracks, "author": playlist.author, "hasChanges": playlist.hasChanges, "isPublished": playlist.isPublished, "publishedPlaylistId": playlist.publishedPlaylistId, "publishedUserId": playlist.publishedUserId, "urlWeb": playlist.urlWeb, "urlMobile": playlist.urlMobile, "urlImage": playlist.urlImage, "imageType": playlist.imageType, "customImagePath": playlist.customImagePath, } } }), {headers: new Headers({ 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + this.token })}) .subscribe(res => { console.log("saved data", res.json()); let updatedPlaylist = this.buildPlaylistFromResource(res.json().result); let ps = this._playlists.getValue(); let index = ps.findIndex((playlist: Playlist) => playlist.id === updatedPlaylist.id); if (index === -1) { this._playlists.next(ps.push(updatedPlaylist)); } else { playlist.dateCreated = updatedPlaylist.dateCreated; playlist.dateUpdated = updatedPlaylist.dateUpdated; this._playlists.next(ps.set(index, playlist)); } }) } uploadPlaylistImage(playlist:Playlist, file:File, bucketPath:string, filename:string) { this.http .put(this.storageUrl + "/" + this.imageBucket + bucketPath + filename, file, {headers: new Headers({ 'Authorization': 'Bearer ' + this.token, 'Content-Type': 'image' })}) // TODO HTTP error handling. //.catch(this.handleError) .subscribe(res => { console.log("upload done"); playlist.imageType = "upload"; playlist.customImagePath = bucketPath + filename; playlist.hasChanges = true; this.save(playlist); this.http.post("/image_ingest" + bucketPath + filename, {}) .subscribe(ingestResponse => { console.log("ingest done."); }); }) } protected handleError(error: any): Promise<any> { console.error('An error occurred', error); // for demo purposes only return Promise.reject(error.message || error); } }<file_sep>/app/metadata-modal.component.ts import { Component, ElementRef, Input, ViewChild } from '@angular/core'; import { Playlist } from './models'; import {PlaylistDraftService} from "./playlist-draft.service"; import {Http, Headers} from "@angular/http"; import {ReTokenService} from "./re-token.service"; import {Router} from "@angular/router"; declare var Base64:any; declare var jQuery: any; declare var erinyesConfig:any; @Component({ selector: 'metadata-modal', templateUrl: 'app/metadata-modal.component.html', styleUrls: ['app/metadata-modal.component.css'] }) export class PlaylistMetadataModalComponent { @Input() playlist: Playlist = new Playlist(); @ViewChild("metadata") metadataModal: ElementRef; @ViewChild("collectionImage") collectionImage: ElementRef; protected authorMissing:boolean = false; protected titleMissing:boolean = false; protected descriptionMissing:boolean = false; protected token:string = ""; protected radioeditApiUrl:string = erinyesConfig.radioeditApiURL; protected imageBucket:string = erinyesConfig.imageBucket; protected mediaserverURIPrefix:string = erinyesConfig.mediaserverURIPrefix; protected allowCancel:boolean = false; protected didCancel:boolean = false; protected successCallback:any = null; constructor(protected draftService: PlaylistDraftService, protected reTokenService: ReTokenService, protected router: Router) { console.log("erinyes", erinyesConfig); console.log("api url", this.radioeditApiUrl); this.reTokenService.cacheToken.subscribe(token => { console.log("metadata getting token", token); if (token) { this.token = token; } }); } cancel() { this.didCancel = true; this.router.navigate(['dashboard']); this.hide(); } clearImage() { this.playlist.imageType = "2x2"; this.playlist.customImagePath = ""; this.draftService.save(this.playlist); } done() { if (this.didCancel) { // Short-circuit if we cancelled. return; } if (!this.playlist.author) { this.authorMissing = true; } else { this.authorMissing = false; } if (!this.playlist.title) { this.titleMissing = true; } else { this.titleMissing = false; } if (!this.playlist.description) { this.descriptionMissing = true; } else { this.descriptionMissing = false; } if (this.authorMissing || this.titleMissing || this.descriptionMissing) { return false; } if (this.playlist.hasChanges) { this.draftService.save(this.playlist); } if (this.successCallback) { this.successCallback(); return; } return true; } onChange() { this.playlist.hasChanges = true; } onFilesChange(files:FileList) { console.log("files", files); if (files.length > 0) { let name:string = files[0].name; let namePieces:string[] = name.split("."); let ext:string = namePieces[namePieces.length - 1].toLowerCase(); if (ext != "jpg" && ext != "png") { alert("Image must be a JPG or PNG."); this.collectionImage.nativeElement.value = ""; return; } // TODO This logic should probably be somewhere more model-y, not on the metadata panel. let bucketPath:string = "/genre/"; let filename:string = this.playlist.id.substr(1) + "-" + new Date().getTime().toString() + "." + ext; this.draftService.uploadPlaylistImage(this.playlist, files[0], bucketPath, filename); this.collectionImage.nativeElement.value = ""; } } onHide($element) { return this.done(); } show(data?: {}) { if (!data) { data = {}; } data["closable"] = false; if (!data["onHide"]) { data["onHide"] = this.onHide.bind(this); } if (data["allowCancel"]) { this.allowCancel = true; } else { this.allowCancel = false; } if (data["onHideSuccessfulCallback"]) { this.successCallback = data["onHideSuccessfulCallback"]; } else { this.successCallback = null; } this.didCancel = false; jQuery(this.metadataModal.nativeElement) .modal(data || {}) .modal("toggle"); } hide() { console.log("HIDING") jQuery(this.metadataModal.nativeElement) .modal("hide"); } makeMediaserverURL(baseImage:string): string { return this.mediaserverURIPrefix + "/v3/url/" + Base64.encodeURI(baseImage) + "?ops=fit(250,250)"; } }<file_sep>/app/track-search.service.ts import { Injectable } from '@angular/core'; import { Headers, Http, URLSearchParams } from '@angular/http'; import 'rxjs/add/operator/toPromise'; import { AmpTrack } from './models'; class AmpTrackResults { tracks: AmpTrack[]; } class AmpTrackResultsContainer { results: AmpTrackResults; } @Injectable() export class TrackSearchService { protected headers = new Headers({'Content-Type': 'application/json'}); // http://qa-ampinternal.ihrcloud.net/internal/docs#!/catalog/get_api_v3_catalog_tracks_ids protected ampSearchUrl = 'http://qa-ampinternal.ihrcloud.net/api/v3/search/all'; protected ampTrackDetailsUrl = 'http://qa-ampinternal.ihrcloud.net/api/v3/catalog/tracks/'; constructor(protected http: Http) { } getTracks(trackTitle: string): Promise<AmpTrack[]> { let params = new URLSearchParams(); params.set('keywords', trackTitle); params.set('startIndex', '0'); params.set('maxRows', '250'); params.set('countryCode', '"US"'); params.set('personalize', 'false'); params.set('track', 'true'); params.set('bundle', 'true'); params.set('artist', 'true'); params.set('station', 'true'); params.set('featuredStation', 'false'); params.set('talkShow', 'false'); params.set('talkTheme', 'false'); params.set('keyword', 'false'); params.set('playlist', 'false'); return this.http.get(this.ampSearchUrl, { search: params }) .toPromise() .then(response => { let container:AmpTrackResultsContainer = response.json(); return container.results.tracks; }) .catch(this.handleError); } // getHero(id: number): Promise<Hero> { // return this.getHeroes() // .then(heroes => heroes.find(hero => hero.id === id)); // } getDetails(trackIds: number[]): Promise<AmpTrack[]> { let trackSearch = trackIds.join(","); return this.http.get(this.ampTrackDetailsUrl + trackSearch) .toPromise() .then(response => { let container:AmpTrackResults = response.json(); return container.tracks; }) .catch(this.handleError); } protected handleError(error: any): Promise<any> { console.error('An error occurred', error); // for demo purposes only return Promise.reject(error.message || error); } }<file_sep>/app/playlist.component.ts import { Component, EventEmitter, Output, ViewChild } from '@angular/core'; import { Router, ActivatedRoute } from '@angular/router'; import { Playlist, PlaylistTrack } from './models'; import {PlaylistMetadataModalComponent} from "./metadata-modal.component"; import {PlaylistDraftService} from "./playlist-draft.service"; import {TrackSearchComponent} from "./track-search.component"; import {SelectorService} from "./selector.service"; import {Subscription} from "rxjs"; import {List} from "immutable"; @Component({ selector: 'playlist', templateUrl: 'app/playlist.component.html', styleUrls: ['app/playlist.component.css'] }) export class PlaylistComponent { @ViewChild("metadata") metadataModal:PlaylistMetadataModalComponent; @ViewChild("search") search:TrackSearchComponent; @Output() playlistChange = new EventEmitter(); playlist: Playlist; isReadyToShow: boolean = false; playlistSubscription: Subscription = null; knownPlaylists:List<Playlist> = List<Playlist>(); upcomingPlaylistID: string = ""; constructor(protected route: ActivatedRoute, protected router: Router, protected draftService: PlaylistDraftService, protected selectorService: SelectorService) { } ngOnInit() { this.route.params.subscribe(params => { this.paramsChanged(params['id']); }); this.playlistSubscription = this.draftService.playlists.subscribe( ps => { this.knownPlaylists = ps; this.attemptToLoadPlaylist(); } ) } ngOnDestroy() { if (this.playlistSubscription) { console.log("unsubscribing"); this.playlistSubscription.unsubscribe(); } } ngAfterViewInit() { } attemptToLoadPlaylist():void { if (!this.upcomingPlaylistID) { return; } for (let i:number=0; i < this.knownPlaylists.count(); i++) { if (this.knownPlaylists.get(i).id == this.upcomingPlaylistID) { this.playlist = this.knownPlaylists.get(i); this.draftService.fillInPlaylist(this.playlist); this.selectorService.update(this.playlist.id); this.upcomingPlaylistID = ""; return; } } } displayModalForNewPlaylist():void { this.metadataModal.show({ onHideSuccessfulCallback: this.attemptNewPlaylist.bind(this), allowCancel: true }); } attemptNewPlaylist():void { this.draftService.playlists.subscribe(res => this.router.navigate(['/playlist', this.playlist.id]) ); this.draftService.save(this.playlist); } paramsChanged(id) { console.log("paramsChanged again", id); if (id == -1) { this.playlist = new Playlist(); this.displayModalForNewPlaylist(); } else { this.upcomingPlaylistID = id; this.attemptToLoadPlaylist(); } } deletePlaylist() { if (confirm("Are you sure you wish to delete this playlist? You will not be able to recover it.")) { this.draftService.delete(this.playlist); this.router.navigate(['/']) } } publish() { // TODO Blocking UI? this.draftService.publish(this.playlist); } unpublish() { console.log("unpublish?"); // TODO Blocking UI? this.draftService.unpublish(this.playlist, true); } removeTrack(idx) { this.playlist.tracks.splice(idx, 1); this.playlist.hasChanges = true; this.draftService.save(this.playlist); } showModal() { this.metadataModal.show(); } showSearch() { } upTop(idx) { if (idx <= 0) { return; } let track = this.playlist.tracks[idx]; this.playlist.tracks.splice(idx, 1); this.playlist.tracks.splice(0, 0, track); this.playlist.hasChanges = true; this.draftService.save(this.playlist); } upOne(idx) { if (idx <= 0) { return; } let track = this.playlist.tracks[idx]; this.playlist.tracks.splice(idx, 1); this.playlist.tracks.splice(idx - 1, 0, track); this.playlist.hasChanges = true; this.draftService.save(this.playlist); } downOne(idx) { if (idx >= this.playlist.tracks.length - 1) { return; } let track = this.playlist.tracks[idx]; this.playlist.tracks.splice(idx, 1); this.playlist.tracks.splice(idx + 1, 0, track); this.playlist.hasChanges = true; this.draftService.save(this.playlist); } downLow(idx) { if (idx >= this.playlist.tracks.length - 1) { return; } let track = this.playlist.tracks[idx]; this.playlist.tracks.splice(idx, 1); this.playlist.tracks.splice(this.playlist.tracks.length, 0, track); this.playlist.hasChanges = true; this.draftService.save(this.playlist); } }
43b423e124578ae6d81104734d1b040bfcf60575
[ "TypeScript" ]
4
TypeScript
joprice/playlists
3db8886816b7308e59a43f23707d861b5da2b4b2
fd009fbc0bb76d1ecca616b92595e06872adad45
refs/heads/master
<repo_name>osundiranay/IntroDataSci-5000<file_sep>/materials/lecture-notes/17-text_dates.Rmd # Text and Dates In this chapter we briefly discuss common patterns to handle text and date data and point to useful resources. ## Text Frequently, data scraped or ingested will contain text that will need to be processed to either extract data, correct errors or resolve duplicate records. In this section we will look at a few common patterns: 1) tools for string operations, 2) tools using regular expressions, and 3) deriving attributes from text. For further reading consult: http://r4ds.had.co.nz/strings.html ### String operations The `stringr` package contains a number of useful, commonly used string manipulation operations. ```{r} library(tidyverse) library(stringr) short_string <- "I love Spring" long_string <- "There's is nothing I love more than 320 in the Spring" ``` Here are a few common ones: - string length: `str_len` ```{r} str_length(c(short_string, long_string)) ``` - combining strings: `str_c` ```{r} str_c(short_string, long_string, sep=". ") ``` - subsetting strings: `str_sub` ```{r} str_sub(c(short_string, long_string), 2, 5) ``` - trim strings: `str_trim` ```{r} str_trim(" I am padded ", side="both") ``` ### Regular expressions By far, the most powerful tools for extracting and cleaning text data are regular expressions. The `stringr` package provides a great number of tools based on regular expression matching. First, some basics ```{r} strs <- c("apple", "banana", "pear") str_view(strs, "an") ``` - Match any character: `.` - Match the 'dot' character: `\\.` ```{r} str_view(strs, ".a.") str_view(c(strs, "a.c"), "a\\.c") ``` - Anchor start (`^`), end (`$`) ```{r} str_view(strs, "^a") str_view(strs, "a$") str_view(c("apple pie", "apple", "apple cake"), "apple") str_view(c("apple pie", "apple", "apple cake"), "^apple$") ``` - Character classes and alternatives * `\d`: match any digit * `\s`: match any whitespace (e.g., space, tab, newline) * `[abc]`: match set of characters (e.g, `a`, `b`, or `c`) * `[^abc]`: match anything except this set of characters * `|`: match any of one or more patterns Match vowels or digits ```{r} str_view(c("t867nine", "gray9"), "[aeiou]|[0-9]") ``` - Repetition * `?`: zero or one * `+`: one or more * `*`: zero or more ```{r} str_view(c("color", "colour"), "colou?r") ``` - Grouping and backreferences Parentheses define groups, which can be referenced using `\1`, `\2`, etc. ```{r} fruit <- c("banana", "coconut", "cucumber", "jujube", "papaya", "salal berry") str_view(fruit, "(..)\\1") ``` ### Tools using regular expressions - Determine which strings match a pattern: `str_detect`: given a vector of strings, return `TRUE` for those that match a regular expression, `FALSE` otherwise ```{r} data(words) print(head(words)) data_frame(word=words, result=str_detect(words, "^[aeiou]")) %>% sample_n(30) ``` Similarly, `str_count` returns the number of matches in a string instead of just `TRUE` or `FALSE` - Filter string vectors to include only those that match a regular expression ```{r} data(sentences) print(head(sentences)) colors <- c("red", "orange", "yellow", "green", "blue", "purple") colors_re <- str_c(colors, collapse="|") print(colors_re) sentences_with_color <- str_subset(sentences, colors_re) %>% head(10) str_view_all(sentences_with_color, colors_re) ``` - Extracting matches: `str_extract`, `str_extract_all` ```{r} str_extract(sentences_with_color, colors_re) ``` - Grouped matches: `str_match` ```{r} noun_re <- "(a|the) ([^ ]+)" noun_matches <- sentences %>% str_subset(noun_re) %>% str_match(noun_re) %>% head(10) noun_matches ``` The result is a string matrix, with one row for each string in the input vector. The first column includes the complete match to the regular expression (just like `str_extract`), the remaining columns has the matches for the groups defined in the pattern. To extract the first group matches one would index one of the columns. For example, the matches for the second group are ```{r} noun_matches[,3] ``` - Splitting strings: `str_split` split strings in a vector based on a match. For instance, to split sentences into words: ```{r} sentences %>% head(5) %>% str_split(" ") ``` ### Extracting attributes from text Handling free text in data pipelines and or statistical models is tricky. Frequently we extract attributes from text in order to perform analysis. We draw from https://www.tidytextmining.com/tidytext.html for this discussion. We usually think of text datasets (called a text _corpus_) in terms of - _documents_: the instances of free text in our dataset, and - _terms_ the specific, e.g., words, they contain. In terms of the representation models we have used so far, we can think of _documents as entities_, described by attributes based on words, or _words as entitites_, described by attributes based on documents. To _tidy_ text data, we tend to create **one-token-per-row** data frames that list the instances of _terms_ in _documents_ in a dataset Here's a simple example using Jane Austen text ```{r} library(janeaustenr) library(tidyverse) original_books <- austen_books() %>% group_by(book) %>% mutate(linenumber = row_number(), chapter = cumsum(str_detect(text, regex("^chapter [\\divxlc]", ignore_case=TRUE)))) %>% ungroup() original_books ``` Let's re-structure it as a **one-token-per-row** column using the `unnest_tokens` function in the `tidytext` package ```{r} library(tidytext) tidy_books <- original_books %>% unnest_tokens(word, text) tidy_books ``` Let's remove stop words from the data frame ```{r} data(stop_words) tidy_books <- tidy_books %>% anti_join(stop_words, by="word") tidy_books ``` Now, we can use this dataset to compute attributes for entities of interest. For instance, let's create a data frame with _words_ as entities, with an attribute containing the number of times the word appears in this corpus ```{r} frequent_words <- tidy_books %>% count(word, sort=TRUE) %>% filter(n > 600) ``` Which can then use like other data frames as we have used previously. For example to plot most frequent words: ```{r} frequent_words %>% mutate(word = reorder(word, n)) %>% ggplot(aes(x=word, y=n)) + geom_col() + theme_bw() + labs(x=NULL, y="frequency") + coord_flip() ``` ## Handling dates The `lubridate` package provides common operations for parsing and operating on dates and times. See http://r4ds.had.co.nz/dates-and-times.html for more information. A number of functions for parsing dates in a variety of formats are provided, along with functions to extract specific components from parsed date objects ```{r} library(lubridate) datetime <- ymd_hms("2016-07-08 12:34:56") year(datetime) month(datetime) day(datetime) mday(datetime) yday(datetime) wday(datetime) ``` They can also return month and day of the week names, abbreviated, as ordered factors ```{r} month(datetime, label=TRUE) ``` We can also create attributes of type `datetime` from string attributes. Here's an example using the flights dataset ```{r} flights_with_dt <- flights %>% mutate(dep_dt=make_datetime(year, month, day, dep_time %/% 100, dep_time %% 100)) %>% dplyr::select(year, month, day, dep_time, dep_dt) flights_with_dt ``` With this attribute in place we can extract day of the week and plot the number of flights per day of the week ```{r} flights_with_dt %>% mutate(wday=wday(dep_dt, label=TRUE)) %>% ggplot(aes(x=wday)) + geom_bar() ``` <file_sep>/content/lecture-note/linear_models/index.md --- date: 2016-10-26T06:12:26-04:00 title: Linear models for regression and classification --- Basic models for regression and classification [Lecture Notes: Simple linear regression](linear_regression/) [Lecture Notes: Multiple linear regression](multiple_linear_regression/) [Lecture Notes: Linear models for classification](linear_classification/) ## Resources - ISLR, Ch. 3 and 4 - Zumel & Mount Ch. 7 <file_sep>/content/datasci_corner/index.md --- title: "Data Science Corner" --- Lecturely droplets of Data Science interesting stuff... <file_sep>/materials/homeworks/midtermI_review.md --- title: Midterm I Material author: CMSC 320 geometry: margin=1in fontfamily: utopia --- ## Preliminaries - Data Analysis Cycle: acquisition -> preparation -> modeling -> communication ### References - Lecture Notes Ch. 2-4 ## Measurement types - categorical - ordered categorical (ordinal) - discrete numerical - continuous numerical - text, datetime - the importance of units ### References - Lecture Notes Ch. 5 - HW 1 ## Data Manipulation Operations - single table operations (subsetting attributes, subsetting entitites) - more single table operations (sorting, creating new attributes, summarization, grouping entities _group by_) - operation pipelines - the multiple types of joins ### References - Lecture Notes, Ch. 6,7,13 - HW 1, 2 ## Basic plotting ### Level 1 - The data/mapping/geometry definition of data visualizations - Frequently used plots: scatterplot, bar graph, histogram, boxplot ### References - Lecture Notes, Ch. 8 ## Best practices - the importance of reproducibility - tools to improve reproducibility (debugging data science) - data science ethics and responsible conduct of research (informed consent, privacy and anonimity) ### References - Guest lecture by <NAME>, posted on calendar ## Tidy Data and Data Models - Components of a Data Model - Basics of the Entity-Relationship and Relational Data Models - The components of an ER diagram - The relationship between tidy data, the ER and the Relational models - Keys/Foreign Keys in the Entity-Relationship data model - How an ER diagram is converted into a set of Relations (data tables) - Integrity and consistency: uniqueness constraints, relationship multiplicity constraints, referential constraints ### References - Lecture Notes, Ch. 11, Lecture slides ## SQL and Database Systems - the difference between declarative and procedural representation of data operations - the Select-From-Where SQL query - Joins in SQL - Database query optimization principles - JSON ### References - Lecture Notes, Ch. 12, 15 - HW 2 ## Data scraping - The hierarchical structure of HTML documents - Basic CSS selector syntax: type, class, id, attribute ### References - Lecture Notes 16.2, Lecture slides ## Data cleaning - Common problems in data tidying - The gather and spread data tidying operations (data values as headers) - Normalizing data tables (More than one entity in a table) - Regular expression basics ### References - Lecture Notes Ch. 17, 18 ## Entity Resolution - The Entity Resolution problem - Calculating similarity between categorical attribute values - Calculating similarity between numeric attribute values - Calculating similarity between entities - Solving the one-to-many resolution problem ### References - Lecture Notes Ch. 19 ## Network Data - Using graphs (nodes, edges) to represent data (entities, relationships) - Derived attributes from graphs (degree, betweeness) ### References - Lecture Slides # Midterm Structure The midterm will consist of three sections: ~8-10 multiple choice questions, ~5-7 short questions, and 1 or 2 longer questions. Multiple choice will test concepts and definitions along with problems similar to written exercises in class. Short questions will be similar to written problems done in homework, along with concept questions where longer written answers are required. Longer questions are for problem solving (e.g., design a data pipeline or SQL queries to carry out a specific task). **You can bring 1 double sided 8.5x11in sheet of notes to the exam.** <file_sep>/materials/classroom-scripts/cmsc320_class-script_20190221.Rmd --- title: "R Notebook" output: html_notebook editor_options: chunk_output_type: inline --- ```{r} db <- DBI::dbConnect(RSQLite::SQLite(), "data/lahman2016.sqlite") ``` ```{sql, connection=db} with good_players(playerID, total_hits, total_hrs) as ( select playerID, sum(H) as total_hits, sum(HR) as total_hrs from Batting group by playerID having total_hits >= 3000 or total_hrs >= 500 ), players_in_hof(playerID) as ( select distinct playerID from HallOfFame where inducted="Y" ) select m.nameFirst, m.nameLast, total_hits, total_hrs from good_players as g join Master as m on g.playerID = m.playerID where g.playerID not in players_in_hof ``` # players with AB >= 100 from CA, what is their maximum batting average # declarative ```{sql, connection=db} select max(1.0 * b.H / b.AB) as best_ba from Batting as b join Master as m on b.playerId = m.playerId where b.AB >= 100 and m.birthState = "CA" ``` # procedural representation ```{r} library(Lahman) Batting %>% inner_join(Master, by="playerID") %>% # |Batting| x |Master| filter(AB >= 100, birthState == "CA") %>% # |Batting| mutate(BA = 1.0 * H / AB) %>% # |B1| summarize(max(BA)) # |B1| ``` ```{r} Batting %>% filter(AB >= 100) %>% # |Batting| inner_join(Master %>% filter(birthState == "CA"), by="playerID") %>% # |Master| + |B1| x |M1| mutate(BA = 1.0 * H / AB) %>% # |B1| summarize(max(BA)) # |B1| ``` <file_sep>/static/misc/hw1_datatypes_wrangling.Rmd --- title: 'HW: Datatypes and Wrangling' author: "<Your Name Here>" date: "`r Sys.Date()`" output: pdf_document: default html_notebook: default editor_options: chunk_output_type: inline --- ```{r knitr_setup, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` ## Data types _1) Provide a URL to the dataset._ _2) Explain why you chose this dataset._ _3) What are the entities in this dataset? How many are there?_ _4) How many attributes are there in this dataset? _5) What is the datatype of each attribute (categorical -ordered or unordered-, numeric -discrete or continuous-, datetime, geolocation, other)? Write a short sentence stating how you determined the type of each attribute. Do this for at least 5 attributes, if your dataset contains more than 10 attributes, choose 10 of them to describe._ | Num | Name | Type | Description | |-----|------|------|-------------| | 1 | <name> | <type> | <descr> | _6) Write R code that loads the dataset using function `read_csv`. Were you able to load the data successfully? If no, why not?_ ```{r load_data, message=FALSE} library(tidyverse) # loading code goes here ``` ## Wrangling 1) My pipeline computes... ```{r pipeline} # pipeline goes here ``` ## Plotting 1) This plot shows... ```{r plot} # plot goes here ```<file_sep>/materials/lecture-notes/31-model_evaluation.Rmd # Model Selection and Evaluation ## Classifier evaluation So far we have seen how we can use the _error rate_ of a classifier to measure it's generalization performance. How do we determine how well classifiers are performing? One way is to compute the _error rate_ of the classifier, the percent of mistakes it makes when predicting class ```{r} library(MASS) logis_fit <- glm(default ~ balance, data=Default, family="binomial") logis_pred_prob <- predict(logis_fit, type="response") logis_pred <- ifelse(logis_pred_prob > 0.5, "Yes", "No") print(table(predicted=logis_pred, observed=Default$default)) # error rate mean(Default$default != logis_pred) * 100 # dummy error rate mean(Default$default != "No") * 100 ``` In this case, it would seem that logistic regression performs well. But in fact, we can get similar error rate by always predicting "no default". We can see from this table that these errors are not symmetric. It's most common error is that _it misses true defaults_. We need a more precise language to describe classification mistakes: | | True Class + | True Class - | Total | |------------------:|:--------------------|---------------------|-------| | Predicted Class + | True Positive (TP) | False Positive (FP) | P* | | Predicted Class - | False Negative (FN) | True Negative (TN) | N* | | Total | P | N | | Using these we can define statistics that describe classifier performance | Name | Definition | Synonyms | |--------------------------------:|:-----------|---------------------------------------------------| | False Positive Rate (FPR) | FP / N | Type-I error, 1-Specificity | | True Positive Rate (TPR) | TP / P | 1 - Type-II error, power, sensitivity, **recall** | | Positive Predictive Value (PPV) | TP / P* | **precision**, 1-false discovery proportion | | Negative Predicitve Value (NPV) | FN / N* | | In the credit default case we may want to increase **TPR** (recall, make sure we catch all defaults) at the expense of **FPR** (1-Specificity, clients we lose because we think they will default) This leads to a natural question: Can we adjust our classifiers TPR and FPR? Remember we are classifying `Yes` if $$ \log \frac{P(Y=\mathtt{Yes}|X)}{P(Y=\mathtt{No}|X)} > 0 \Rightarrow \\ P(Y=\mathtt{Yes}|X) > 0.5 $$ What would happen if we use $P(Y=\mathtt{Yes}|X) > 0.2$? ```{r, fig.width=12, message=FALSE} library(ROCR) pred <- prediction(logis_pred_prob, Default$default) layout(cbind(1,2)) plot(performance(pred, "tpr")) plot(performance(pred, "fpr")) ``` A way of describing the TPR and FPR tradeoff is by using the **ROC curve** (Receiver Operating Characteristic) and the **AUROC** (area under the ROC) ```{r, fig.width=12} auc <- unlist(performance(pred, "auc")@y.values) plot(performance(pred, "tpr", "fpr"), main=paste("LDA AUROC=", round(auc, 2)), lwd=1.4, cex.lab=1.7, cex.main=1.5) ``` Consider comparing a logistic regression model using all predictors in the dataset, including an interaction term between balance and student. ```{r} full_logis <- glm(default~balance*student + income, data=Default, family="binomial") full_logis_probs <- predict(full_logis, Default, type="response") full_logis_preds <- ifelse(full_logis_probs > 0.5, "Yes", "No") print(table(logis_pred, full_logis_preds, Default$default)) pred_list <- list( balance_logis = logis_pred_prob, full_logis = full_logis_probs, dummy = rep(0, nrow(Default))) pred_objs <- lapply(pred_list, prediction, Default$default) aucs <- sapply(pred_objs, function(x) unlist( performance(x, "auc")@y.values)) roc_objs <- lapply(pred_objs, performance, "tpr", "fpr") ``` ```{r, echo=FALSE} library(RColorBrewer) palette(brewer.pal(8,"Dark2")) ``` ```{r} for (i in seq(along=roc_objs)) { plot(roc_objs[[i]], add = i != 1, col=i, lwd=3, cex.lab=1.5) } legend("bottomright", legend=paste(gsub("_", " ", names(pred_list)), "AUROC=",round(aucs, 2)), col=1:3, lwd=3, cex=2) ``` Another metric that is frequently used to understand classification errors and tradeoffs is the precision-recall curve: ```{r, fig.width=10, fig.height=9} library(caTools) pr_objs <- lapply(pred_objs[1:2], performance, "prec", "rec") for (i in seq(along=pr_objs)) { plot(pr_objs[[i]], add = i != 1, col=i, lwd=3, cex.lab=1.5) } auprs <- sapply(pr_objs, function(pr_obj) { pr_x <- unlist(pr_obj@x.values) pr_y <- unlist(pr_obj@y.values) keep <- is.finite(pr_x) & is.finite(pr_y) pracma::trapz(pr_x[keep], pr_y[keep]) } ) legend("bottomleft", legend=paste(gsub("_", " ", names(pr_objs)), "AUPR=",round(auprs, 2)), col=1:3, lwd=3, cex=1.3) ``` Here we see a marginal difference between the two models, with the full model showing a slightly higher precision at the same recall values and slightly higher area under the precision-recall curve. This is commonly found in datasets where there is a skewed distribution of classes (e.g., there are many more "No" than "Yes" in this dataset). The area under the PR curve tends to distinguish classifier performance than area under the ROC curve in these cases. ## Model selection Our discussion on regression and classification has been centered on fitting models by minizing error or maximizing likelihood given a dataset (also referred to as training data). This is usually fine when we want to use our model for _explanatory_ or _inferential_ tasks. Or when we use relatively inflexible models, like linear regression or logistic regression. However, as our interests shift to _prediction_ and more complex models, like non-linear regression, Tree-based methods or Support Vector Machines, this is usually not sufficient. In these cases, our goal is to avoid building models that are too _specific_ for the dataset we have on hand. Complex models can easily **overfit** our training data, in which case we don't learn much about the _population_ from which we obtain the training data and instead learn only about the training data itself. We say that we want to learn, or train models that **generalize** beyond the training data to other, unseen, data from the same population. This leads to a bit of an issue. How do we measure our models ability to predict unseen data, when we only have access to training data? ### Cross Validation The most common method to evaluate model **generalization** performance is _cross-validation_. It is used in two essential data analysis phases: _Model Selection_ and _Model Assessment_. In _Model Selection_, we decide how complex of a model we should fit. Consider a linear regression example: I will fit a linear regression model, what predictors should be included?, interactions?, data transformations? Another example is what classification tree depth to use. In _Model Assessment_, we determine how well does our selected model performs as a **general** model. Example: I've built a linear regression models, with specific predictors. How well will it perform on unseen data? The same question can be asked of a classification tree (of specific depth). Cross-validation is a _resampling_ method to obtain estimates of **test error rate** (or any other performance measure on unseen data). In some instances, you will have a large predefined test dataset **that you should never use when training**. In the absence of access to this kind of dataset, cross validation can be used. ### Validation Set The simplest option to use cross-validation is to create a _validation_ set, where our dataset is **randomly** divided into _training_ and _validation_ sets. Then the _validation_ is set aside, and not used at until until we are ready to compute **test error rate** (once, don't go back and check if you can improve it). ![](img/validation.png) Let's look at our running example using automobile data, where we want to build a regression model capable of predicting miles per gallon given other auto attributes. We saw in previous lectures that a linear regression model was not appropriate for this dataset. So instead we will use _polynomial_ regression as an illustrative example. ```{r, echo=TRUE, message=FALSE} library(ggplot2) library(ISLR) data(Auto) ggplot(Auto, aes(x=horsepower, y=mpg)) + geom_point() + geom_smooth() ``` In this case our regression model (for a single predictor $x$) is given as a $d$ degree polynomial. $$ \mathbb{E}[Y|X=x] = \beta_0 + \beta_1 x + \beta_2 x^2 + \cdots + \beta_d x^d $$ In the _Model Selection_ case, we want to decide what degree $d$ we should use to model this data. Using the _validation set_ method, we split our data into a training set, fit the regression model with different polynomial degrees $d$ on the training set, and measure test error on the validation set. ```{r, echo=TRUE} set.seed(1234) in_validation <- sample(nrow(Auto), nrow(Auto)/2) validation_set <- Auto[in_validation,] training_set <- Auto[-in_validation,] library(broom) library(dplyr) degrees <- seq(1, 10) error_rates <- sapply(degrees, function(deg) { fit <- lm(mpg~poly(horsepower, degree=deg), data=training_set) predicted <- predict(fit, newdata=validation_set) mean((validation_set$mpg - predicted)^2) }) plot(degrees, error_rates, type="b", xlab="Polynomial Degree", ylab="Mean Squared Error", pch=19, lwd=1.4, cex=1.4) ``` ### Resampled validation set This approach can be prone to sampling issues. It can be highly variable as error rate is a random quantity and depends on observations in training and validation sets. We can improve our estimate of _test error_ by averaging multiple measurements of it (remember the law of large numbers). We can do so by replicating our validation resampling 10 times (with different validation and training sets) and averaging the resulting test errors. ```{r, echo=FALSE, warning=FALSE, message=FALSE} set.seed(1234) library(RColorBrewer) palette(brewer.pal(10, "Dark2")) degrees <- seq(1, 10) error_rates <- replicate(10, { in_validation <- sample(nrow(Auto), nrow(Auto)/2) validation_set <- Auto[in_validation,] training_set <- Auto[-in_validation,] sapply(degrees, function(deg) { fit <- lm(mpg~poly(horsepower, degree=deg), data=training_set) predicted <- predict(fit, newdata=validation_set) mean((validation_set$mpg - predicted)^2) }) }) matplot(degrees, error_rates, type="b", pch=19, xlab="Polynomial Degree", ylab="Mean Squared Error", lwd=1.4, cex=1.4) ``` ### Leave-one-out Cross-Validation This approach still has some issues. Each of the training sets in our validation approach only uses 50% of data to train, which leads to models that may not perform as well as models trained with the full dataset and thus we can overestimate error. To alleviate this situation, we can extend our approach to the extreme. Make each single training point it's own validation set. Procedure: For each observation $i$ in data set: a. Train model on all but $i$-th observation b. Predict response for $i$-th observation c. Calculate prediction error This gives us the following _cross-validation_ estimate of error. $$ CV_{(n)} = \frac{1}{n} \sum_i (y_i - \hat{y}_i)^2 $$ ![](img/loocv.png) The advantages of this approach is that now we use $n-1$ observations to train each model and there is no randomness introduced since error is estimated on each sample. However, it has disadvantages as well. Depending on the models we are trying to fit, it can be very costly to train $n-1$ models. Also, the error estimate for each model is highly variable (since it comes from a single datapoint). ```{r, echo=FALSE} error_rates <- sapply(degrees, function(deg) { mean(sapply(seq(len=nrow(Auto)), function(i) { fit <- lm(mpg~poly(horsepower, degree=deg), data=Auto[-i,]) (Auto$mpg[i] - predict(fit, newdata=Auto[i,,drop=FALSE]))^2 })) }) ``` ```{r, echo=FALSE} plot(degrees, error_rates, pch=19, cex=1.4, lwd=1.4, xlab="Polynomial Degree", ylab="Cross Validation Error", type="b") ``` For linear models (and some non-linear models) there is a nice trick that allows one to compute (exactly or approximately) LOOCV from the full data model fit which we will not get into here. ### k-fold Cross-Validation This discussion leads us to the most commonly used cross-validation approach _k-fold Cross-Validation_. Procedure: Partition observations randomly into $k$ groups (folds). For each of the $k$ groups of observations: - Train model on observations in the other $k-1$ folds - Estimate test-set error (e.g., Mean Squared Error) Compute average error across $k$ folds ![](img/kfoldcv.png) $$ CV_{(k)} = \frac{1}{k} \sum_i MSE_i $$ where $MSE_i$ is mean squared error estimated on the $i$-th fold In this case, we have fewer models to fit (only $k$ of them), and there is less variance in each of the computed test error estimates in each fold. It can be shown that there is a slight bias (over estimating usually) in error estimate obtained from this procedure. ```{r, echo=FALSE} set.seed(1234) k <- 10 n <- nrow(Auto) fold_size <- ceiling(n/k) permuted_indices <- rep(NA, k * fold_size) permuted_indices[1:n] <- sample(n) fold_indices <- matrix(permuted_indices, nc=k) cv10_error_rates <- sapply(seq(1,k), function(fold_index) { test_indices <- na.omit(fold_indices[,fold_index]) train_set <- Auto[-test_indices,] test_set <- Auto[test_indices,] res <- sapply(degrees, function(deg) { fit <- lm(mpg~poly(horsepower, degree=deg), data=train_set) mean((Auto$mpg[test_indices] - predict(fit, newdata=test_set))^2) }) res }) ``` ```{r, echo=FALSE} matplot(degrees, cv10_error_rates, pch=19, type="b", lwd=1.4, cex=1.4, xlab="Polynomial Degrees", ylab="10-fold CV Error Rate") ``` ### Cross-Validation in Classification Each of these procedures can be used for classification as well. In this case we would substitute MSE with performance metric of choice. E.g., error rate, accuracy, TPR, FPR, AUROC. Note however that not all of these work with LOOCV (e.g. AUROC since it can't be defined over single data points). ### Comparing models statistically using cross-validation Suppose you want to compare two classification models (logistic regression vs. a decision tree) on the `Default` dataset. We can use Cross-Validation to determine if one model is better than the other, using hypothesis testing with a paired $t$-test for example. ```{r, echo=TRUE, message=FALSE} library(ISLR) library(cvTools) library(tree) data(Default) fold_indices <- cvFolds(n=nrow(Default), K=10) error_rates <- sapply(1:10, function(fold_index) { test_indices <- which(fold_indices$which == fold_index) test_set <- Default[test_indices,] train_set <- Default[-test_indices,] logis_fit <- glm(default~., data=train_set, family="binomial") logis_pred <- ifelse(predict(logis_fit, newdata=test_set, type="response") > 0.5, "Yes", "No") logis_error <- mean(test_set$default != logis_pred) tree_fit <- tree(default~., data=train_set) pruned_tree <- prune.tree(tree_fit, best=3) tree_pred <- predict(pruned_tree, newdata=test_set, type="class") tree_error <- mean(test_set$default != tree_pred) c(logis_error, tree_error) }) rownames(error_rates) <- c("logis", "tree") error_rates <- as.data.frame(t(error_rates)) library(tidyr) library(dplyr) error_rates <- error_rates %>% mutate(fold=1:n()) %>% gather(method,error,-fold) error_rates %>% head() %>% knitr::kable("html") ``` ```{r, echo=TRUE, message=FALSE} dotplot(error~method, data=error_rates, ylab="Mean Prediction Error") ``` ```{r, echo=TRUE} lm(error~method, data=error_rates) %>% tidy() %>% knitr::kable() ``` In this case, we do not observe any significant difference between these methods. ## Summary Error and accuracy statistics are not enough to understand classifier performance. Classifications can be done using probability cutoffs to trade, e.g., TPR-FPR (ROC curve), or precision-recall (PR curve). Area under ROC or PR curve summarize classifier performance across different cutoffs. Model selection and assessment are critical steps of data analysis. Resampling methods are general tools used for this purpose. <file_sep>/materials/lecture-notes/10-data_models.Rmd # Tidy Data I: The ER Model ```{r setup10, include=FALSE} knitr::opts_chunk$set(echo = TRUE, cache=TRUE) ``` Some of this material is based on <NAME>'s material: [https://github.com/umddb/datascience-fall14/blob/master/lecture-notes/models.md](https://github.com/umddb/datascience-fall14/blob/master/lecture-notes/models.md) ## Overview In this section we will discuss principles of preparing and organizing data in a way that is amenable for analysis, both in modeling and visualization. We think of a _data model_ as a collection of concepts that describes how data is represented and accessed. Thinking abstractly of data structure, beyond a specific implementation, makes it easier to share data across programs and systems, and integrate data from different sources. Once we have thought about structure, we can then think about _semantics_: what does data represent? - **Structure**: We have assumed that data is organized in rectangular data structures (tables with rows and columns) - **Semantics**: We have discussed the notion of _values_, _attributes_, and _entities_. So far, we have used the following _data semantics_: a dataset is a collection of _values_, numeric or categorical, organized into _entities_ (_observations_) and _attributes_ (_variables_). Each _attribute_ contains values of a specific measurement across _entities_, and _entities_ collect all measurements across _attributes_. In the database literature, we call this exercise of defining structure and semantics as _data modeling_. In this course we use the term _data representational modeling_, to distinguish from _data statistical modeling_. The context should be sufficient to distinguish the two uses of the term _data modeling_. Data representational modeling is the process of representing/capturing structure in data based on defining: - **Data model**: A collection of concepts that describes how data is represented and accessed - **Schema**: A description of a specific collection of data, using a given data model The purpose of defining abstract data representation models is that it allows us to know the structure of the data/information (to some extent) and thus be able to write general purpose code. Lack of a data model makes it difficult to share data across programs, organizations, systems that need to be able to integrate information from multiple sources. We can also design algorithms and code that can significantly increase efficiency if we can assume general data structure. For instance, we can preprocess data to make access efficient (e.g., building a B-Tree on a field). A data model typically consists of: - Modeling Constructs: A collection of concepts used to represent the structure in the data. Typically we need to represent types of *entities*, their *attributes*, types of *relationships* between *entities*, and *relationship attributes* - Integrity Constraints: Constraints to ensure data integrity (i.e., avoid errors) - Manipulation Languages: Constructs for manipulating the data We desire that models are sufficiently _expressive_ so they can capture real-world data well, _easy to use_, and lend themselves to defining computational methods that have good performance. Some examples of data models are - Relational, Entity-relationship model, XML... - Object-oriented, Object-relational, RDF... - Current favorites in the industry: JSON, Protocol Buffers, [Avro](http://avro.apache.org/docs/current/), Thrift, Property Graph Why have so many models been defined? There is an inherent tension between descriptive power and ease of use/efficiency. More powerful, expressive, models can be applied to represent more datasets but also tend to be harder to use and query efficiently. Typically there are multiple levels of modeling. _Physical modeling_ concerns itself with how the data is physically stored. _Logical or Conceptual modeling_ concerns itself with type of information stored, the different entities, their attributes, and the relationships among those. There may be several layers of logical/conceptual models to restrict the information flow (for security and/or ease-of-use): - **Data independence:** The idea that you can change the representation of data w/o changing programs that operate on it. - **Physical data independence:** I can change the layout of data on disk and my programs won't change - index the data - partition/distribute/replicate the data - compress the data - sort the data ## The Entity-Relationship and Relational Models The fundamental objects in this formalism are _entities_ and their _attributes_, as we have seen before, and _relationships_ and _relationship attributes_. Entities are objects represented in a dataset: people, places, things, etc. Relationships model just that, relationships between entities. ![](img/er.png) Here, rectangles are _entitites_, diamonds and edges indicate _relationships_. Circles describe either entity or relationship _attributes_. Arrows are used indicate multiplicity of relationships (one-to-one, many-to-one, one-to-many, many-to-many): ![](img/relationships.png) Relationships are defined over _pairs_ of entities. As such, relationship $R$ over sets of entities $E_1$ and $E_2$ is defined over the _cartesian product_ $E_1 \times E_2$. For example, if $e_1 \in E_1$ and $e_2 \in E_2$, then $(e_1, e_2) \in R$. Arrows specify how entities participate in relationships. In particular, an arrow pointing from an entity set $E_1$ (square) into a relationship over $E_1$ and $E_2$ (diamond) specifies that entities in $E_1$ appear in _only one_ relationship pair. That is, there is a single entity $e_2 \in E_2$ such that $(e_1,e_2) \in R$. Think about what relationships are shown in this diagram? ![](img/er2.png) In databases and general datasets we work on, both Entities and Relationships are represented as _Relations_ (tables) such that a unique entity/relationship is represented by a single tuple (the list of attribute values that represent an entity or relationship). This leads to the natural question of how are unique entities determined or defined. Here is where the concept of a _key_ comes in. This is an essential aspect of the Entity-Relationship and Relational models. ### Formal introduction to keys - Attribute set $K$ is a **superkey** of relation $R$ if values for $K$ are sufficient to identify a unique tuple of each possible relation $r(R)$ - Example: `{ID}` and `{ID,name}` are both superkeys of *instructor* - Superkey $K$ is a **candidate key** if $K$ is minimal - Example: `{ID}` is a candidate key for Instructor - One of the candidate keys is selected to be the **primary key** - Typically one that is small and immutable (doesn’t change often) - Primary key typically highlighted - **Foreign key**: Primary key of a relation that appears in another relation - `{ID}` from *student* appears in *takes, advisor* - *student* called referenced relation - *takes* is the referencing relation - Typically shown by an arrow from referencing to referenced - **Foreign key constraint**: the tuple corresponding to that primary key must exist - Imagine: - Tuple: `('student101', 'CMSC302') `in *takes* - But no tuple corresponding to 'student101' in *student* - Also called referential integrity constraint #### Keys: Examples - Married(person1-ssn, person2-ssn, date-married, date-divorced) - Account(cust-ssn, account-number, cust-name, balance, cust-address) - RA(student-id, project-id, superviser-id, appt-time, appt-start-date, appt-end-date) - Person(Name, DOB, Born, Education, Religion, ...) - Information typically found on Wikipedia Pages - President(name, start-date, end-date, vice-president, preceded-by, succeeded-by) - Info listed on Wikipedia page summary - Rider(Name, Born, Team-name, Coach, Sponsor, Year) - Tour de France: Historical Rider Participation Information ## Tidy Data Later in the course we will use the term Tidy Data to refer to datasets that are represented in a form that is amenable for manipulation and statistical modeling. It is very closely related to the concept of _normal forms_ in the ER model and the process of _normalization_ in the database literature. Here we assume we are working in the ER data model represented as _relations_: rectangular data structures where 1. Each attribute (or variable) forms a column 2. Each entity (or observation) forms a row 3. Each type of entity (observational unit) forms a table Here is an example of a tidy dataset: ```{r} library(nycflights13) head(flights) ``` it has one entity per row, a single attribute per column. Notice only information about flights are included here (e.g., no airport or airline information other than the name) in these observations. <file_sep>/content/lecture-note/introduction/index.md --- date: 2016-08-29T08:59:01-04:00 title: "Lecture Notes: Introduction" --- What is Data Science? [Slides](Intro.pdf) ## Readings - [Zumel & Mount: pp. xix-17]({{< elmurl >}}/files/44629545) - [Loukides, What is data science?](http://radar.oreilly.com/2010/06/what-is-data-science.html) - [Silver, What the fox knows](http://fivethirtyeight.com/features/what-the-fox-knows/) <file_sep>/materials/lecture-notes/08-rmarkdown_intro.Rmd # Brief Introduction to Rmarkdown Rstudio has created an impressive eco-system for publishing with R. It has concentrated around two systems: `Rmarkdown` for creating documents that include both text and data analysis code, publishable in multiple formats, and `shiny`, a framework for creating interactive html applications that allow users to explore data analyses. Rstudio has provided substantial tutorials and documentation for Rmarkdown: http://rmarkdown.rstudio.com/ The tutorial included in that website is quite good: http://rmarkdown.rstudio.com/lesson-1.html. In class we will go over some of the key points using the same Rmarkdown file as an example: http://rmarkdown.rstudio.com/demos/1-example.Rmd <file_sep>/materials/notebooks/sql_baseball.Rmd --- title: "SQL Examples with Baseball" output: html_notebook: df_print: paged html_document: df_print: paged editor_options: chunk_output_type: inline --- Here we show SQL constructs using the Lahman baseball dataset (downloaded from https://github.com/jknecht/baseball-archive-sqlite). We also show how to use a SQL database inside Rmarkdown. First, we create a connection to the database. In this case, we are using a `SQLite` database. A good system to prototype database designs. To make the most of a database system, one would use some of the more powerful products: Oracle, Microsoft SQLServer, MySQL (MariaDB), PostgreSQL or other. In all cases, the way to create a connection to the server from Rmarkdown is the same. ```{r setup} library(tidyverse) ``` ```{r setup_db} library(DBI) db <- dbConnect(RSQLite::SQLite(), dbname="../lecture-notes/data/lahman2016.sqlite") ``` ### Select-From-Where First, we write a query to get batting statistics for Washington Nationals in 2010: note the *rename*. Note that the chunk in the markdown file is declared as <code>{sql, connection=db}</code> in the Rmarkdown file: ```{sql, connection=db} SELECT b.playerId, b.yearId, b.H, b.AB FROM BATTING AS b WHERE teamID = 'WAS' AND yearID = 2010 ``` ### Expressions The **select** clause can contain expressions (this is paralleled by the `mutate` operation we saw previously) ```{sql, connection=db} SELECT b.playerId, b.yearId, b.AB, 1.0 * b.H / b.AB AS BP FROM BATTING AS b ``` ### WHERE predicates The **where** clause support a large number of different predicates and combinations thereof (this is parallel to the `filter` operation) ```{sql, connection=db} SELECT b.playerId, b.yearID, b. teamId, b.AB, 1.0 * b.H / b.AB AS BP FROM BATTING AS b WHERE b.AB > 0 AND b.yearID > 2000 AND b.yearID < 2010 AND b.teamID LIKE 'NY%' ``` ### ORDERING We can include ordering (parallel to `arrange`) ```{sql, connection=db} SELECT b.playerId, b.yearID, b. teamId, b.AB, 1.0 * b.H / b.AB AS BP FROM BATTING AS b WHERE b.AB > 0 AND b.yearID > 2000 AND b.yearID < 2010 AND b.teamID LIKE 'NY%' ORDER BY b.AB DESC, BP DESC; ``` ### Group_by and Summarize - What it does: Partition the tuples by the group attributes (*teamId* and *yearId* in this case), and do something (*compute avg* in this case) for each group - Number of resulting tuples == Number of groups ```{sql, connection=db} SELECT b.teamId, b.yearId, avg(1.0 * b.H / b.AB) AS AVE_BP FROM BATTING AS b WHERE b.AB > 0 AND b.yearID > 2000 AND b.yearID < 2010 GROUP BY b.teamId, b.yearId ORDER BY AVE_BP DESC ``` For reference, this is how we would do this using the `dplyr` operations we learned about previously. ```{r, eval=FALSE} Batting %>% filter(AB > 0, ...) %>% group_by(teamId, yearId) %>% summarize(ave_bp = mean(1.0 * H / AB)) ``` ### Subqueries Sometimes it's easier to nest queries like the one above into query and subquery ```{sql, connection=db} SELECT teamID, yearID, avg(BP) AS AVG_BP FROM (SELECT b.teamId, b.yearId, 1.0 * b.H / b.AB AS BP FROM BATTING AS b WHERE b.AB > 0 AND b.yearID > 2000 AND b.yearID < 2010) GROUP BY teamID, yearID ORDER BY AVG_BP DESC; ``` ## Joins List all players from California, playing in 2015 ```{sql, connection=db} select b.playerId, b.teamId, m.birthState from Batting as b join master as m on b.playerId == m.playerId where yearId = "2015" and m.birthState = "CA" ``` Finally, we close the connection to the database: ```{r} dbDisconnect(db) ```<file_sep>/materials/slides/linear-regression/linear-regression.Rmd --- title: "Linear Regression" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Linear Regression] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r setup1, include=FALSE, message=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) library(tidyverse) library(broom) ``` --- layout: true ## Linear Regression --- Linear regression is a very elegant, simple, powerful and commonly used technique for data analysis. We use it extensively in exploratory data analysis and in statistical analyses --- ### Simple Regression The goal here is to analyze the relationship between a _continuous numerical_ attribute $Y$ and another (_numerical_ or _categorical_) variable $X$. We assume that in the population, the relationship between the two is given by a linear function: $$ Y = \beta_0 + \beta_1 X $$ --- Here is (simulated) data from an advertising campaign measuring sales and the amount spent in advertising. $$ \mathtt{sales} \approx \beta_0 + \beta_1 \times \mathtt{TV} $$ --- .center.image-60[![](img/regression_example.png)] --- We would say that we _regress_ `sales` on `TV` when we perform this regression analysis. As before, given data we would like to estimate what this relationship is in the _population_ (what is the population in this case?). What do we need to estimate in this case? Values for $\beta_0$ and $\beta_1$. What is the criteria that we use to estimate them? --- We are stating mathematically: $$ \mathbb{E}[Y|X=x] = \beta_0 + \beta_1 x $$ --- Given a dataset, the problem is then to find the values of $\beta_0$ and $\beta_1$ that minimize deviation between data and expectation Like the estimation of central trend (mean) we use squared devation to do this. --- **The linear regression problem** Given data $(x_1, y_1), (x_2, y_2), \ldots, (x_n, y_n)$, find values $\beta_0$ and $\beta_1$ that minimize _objective_ or _loss_ function RSS (residual sum of squares): $$ \arg \min_{\beta_0,\beta_1} RSS = \frac{1}{2} \sum_i (y_i - (\beta_0 + \beta_1 x_i))^2 $$ --- .center.image-70[![](img/minimizing.png)] --- Like derivation of the mean as a measure of central tendency we can derive the values of minimizers $\hat{\beta}_0$ and $\hat{\beta}_1$. We use the same principle, compute derivatives (partial this time) of the objective function RSS, set to zero and solve. --- $$ \begin{aligned} \hat{\beta}_1 & = \frac{\sum_{i=1}^n (y_i - \overline{y})(x_i - \overline{x})}{\sum_{i=1}^n (x_i - \overline{x})^2} \\ {} & = \frac{\mathrm{cov}(y,x)}{\mathrm{var}(x)} \\ \hat{\beta}_0 & = \overline{y} - \hat{\beta}_1 \overline{x} \end{aligned} $$ --- ```{r, warning=FALSE, message=FALSE, echo=FALSE} library(ISLR) library(tidyverse) data(Auto) Auto %>% ggplot(aes(x=weight, y=mpg)) + geom_point() + geom_smooth(method=lm) + theme_minimal() ``` --- In R, linear models are built using the `lm` function ```{r} auto_fit <- lm(mpg~weight, data=Auto) auto_fit ``` --- This states that for this dataset $\hat{\beta}_0 = `r auto_fit$coef[1]`$ $\hat{\beta}_1 = `r auto_fit$coef[2]`$. What's the interpretation? --- According to this model, a weightless car `weight=0` would run $\approx `r round(auto_fit$coef[1], 2)`$ _miles per gallon_ on average, and, on average, a car would run $\approx `r -round(auto_fit$coef[2],2)`$ _miles per gallon_ fewer for every extra _pound_ of weight. Units of the outcome $Y$ and the predictor $X$ matter for the interpretation of these values. --- ### Inference Now that we have an estimate, we want to know its precision. The main point is to understand that like the sample mean, the regression line we learn from a specific dataset is an estimate. A different sample from the same population would give us a different estimate (regression line). --- The Central Limit Theorem tells us on average, we are close to population regression line (I.e., close to $\beta_0$ and $\beta_1$), the spread around $\beta_0$ and $\beta_1$ is well approximated by a normal distribution and the spread goes to zero as the sample size increases. --- .image.center-60[![](img/population_line.png)] --- ### Confidence Interval We can construct a confidence interval to say how precise we think our estimates are. We want to see how precise our estimate of $\beta_1$ is, since that captures the relationship between the two variables. --- First, we calculate a standard error estimate for $\beta_1$: $$ \mathrm{se}(\hat{beta}_1)^2 = \frac{\sum_i (y_i - \hat{y}_i)^2}{\sum_i (x_i - \overline{x})^2} $$ --- and construct a 95% confidence interval $$ \beta_1 = \hat{\beta}_1 \pm 1.95 \times \mathrm{se}(\hat{beta}_1) $$ --- Going back to our example: ```{r} auto_fit_stats <- auto_fit %>% tidy() %>% select(term, estimate, std.error) auto_fit_stats ``` --- ```{r, echo=FALSE} confidence_interval_offset <- 1.95 * auto_fit_stats$std.error[2] confidence_interval <- round(c(auto_fit_stats$estimate[2] - confidence_interval_offset, auto_fit_stats$estimate[2], auto_fit_stats$estimate[2] + confidence_interval_offset), 4) ``` Given the confidence interval, we would say, "on average, a car runs $_{`r confidence_interval[1]`} `r confidence_interval[2]`_{`r confidence_interval[3]`}$ _miles per gallon_ fewer per pound of weight. --- ### The $t$-statistic and the $t$-distribution We can also test a null hypothesis about this relationship: "there is no relationship between weight and miles per gallon", this translates to $\beta_1=0$. --- Again, using the same argument based on the CLT, if this hypothesis is true then the distribution of $\hat{\beta}_1$ is well approximated by $N(0,\mathrm{se}(\hat{\beta}_1))$, if we observe the learned $\hat{\beta}_1$ is _too far_ from 0 according to this distribution then we _reject_ the hypothesis. --- The CLT states that the normal approximation is good as sample size increases, but what about moderate sample sizes (say, less than 100)? The $t$ distribution provides a better approximation of the sampling distribution of these estimates for moderate sample sizes, and it tends to the normal distribution as sample size increases. --- The $t$ distribution is commonly used in this testing situation to obtain the probability of rejecting the null hypothesis. It is based on the $t$-statistic $$ \frac{\hat{\beta}_1}{\mathrm{se}(\hat{\beta}_1)} $$ --- You can think of this as a _signal-to-noise_ ratio, or a standardizing transformation on the estimated parameter. --- In our example, we get a $t$ statistic and p-value as follows: ```{r} auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats ``` --- We would say: "We found a statistically significant relationship between weight and miles per gallon. On average, a car runs $_{`r confidence_interval[1]`} `r confidence_interval[2]`_{`r confidence_interval[3]`}$ _miles per gallon_ fewer per pound of weight ( $t$=`r round(auto_fit_stats$statistic[2],2)`, $p<$ `r format(auto_fit_stats$p.value[2], digits=3)` )." --- ### Global Fit We can make _predictions_ based on our conditional expectation, that prediction should be better than a prediction of the outcome with a simple average. We can use this comparison as a measure of how good of a job we are doing using our model to fit this data: how much of the variance of $Y$ can we _explain_ with our model. --- To do this we can calculate _total sum of squares_: $$ TSS = \sum_i (y_i - \overline{y})^2 $$ (this is the squared error of a prediction using the sample mean of $Y$) --- and the _residual sum of squares_: $$ RSS = \sum_i (y_i - \hat{y}_i)^2 $$ (which is the squared error of a prediction using the linear model we learned) --- The commonly used $R^2$ measure compares these two quantities: $$ R^2 = \frac{\mathrm{TSS}-\mathrm{RSS}}{\mathrm{TSS}} = 1 - \frac{\mathrm{RSS}}{\mathrm{TSS}} $$ --- These types of global statistics for the linear model can be obtained using the `glance` function in the `broom` package. In our example ```{r} auto_fit %>% glance() %>% select(r.squared, sigma, statistic, df, p.value) ``` --- ### Some important technicalities We mentioned above that predictor $X$ could be _numeric_ or _categorical_. However, this is not precisely true. We use a transformation to represent _categorical_ variables. --- Here is a simple example: Suppose we have a categorical attribute`sex`. We can create a 0-1 dummy variable $x$ as we have seen before. and fit a model $y = \beta_0 + \beta_1 x$. --- What is the conditional expectation given by this model? If the person is male, then $y=\beta_0$, if the person is female, then $y=\beta_0 + \beta_1$. So, what is the interpretation of $\beta_1$? -- The average difference in credit card balance between females and males. --- We could do a +1/-1 different encoding as well. Then what is the interpretation of $\beta_1$ in this case? --- Note, that when we call the `lm(y~x)` function and `x` is a factor with two levels, the first transformation is used by default. What if there are more than 2 levels? We need multiple regression, which we will see shortly. --- layout: true ## Issues with linear regression --- There are some assumptions underlying the inferences and predictions we make using linear regression We should verify are met when we use this framework. --- ### Non-linearity of outcome-predictor relationship What if the underlying relationship is not linear? We can use exploratory visual analysis to do this for now by plotting residuals $(y_i - \hat{y}_i)^2$ as a function of the fitted values $\hat{y}_i$. --- ```{r, echo=FALSE, message=FALSE} auto_fit %>% augment() %>% ggplot(aes(x=.fitted,y=.resid)) + geom_point() + geom_smooth() + labs(x="fitted", y="residual") ``` --- ### Correlated Error For our inferences to be valid, we need residuals to be independent and identically distributed. We can spot non independence if we observe a trend in residuals as a function of the predictor $X$. --- .center.image-70[![](img/correlated_error.png)] In this case, our standard error estimates would be underestimated and our confidence intervals and hypothesis testing results would be biased. --- ### Non-constant variance Here is an illustration, and a possible fix using a log transformation on the outcome $Y$. .center.image-60[![](img/residual_variance.png)] --- layout: true ## Multiple linear regression --- In this case, we use models of conditional expectation represented as linear functions of multiple variables: $$ \mathbb{E}[Y|X_1=x_1,X_2=x_2,\ldots,X_p=x_p] = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \cdots \beta_3 x_3 $$ --- In the case of our advertising example, this would be a model: $$ \mathtt{sales} = \beta_0 + \beta_1 \times \mathtt{TV} + \beta_2 \times \mathtt{newspaper} + \beta_3 \times \mathtt{facebook} $$ --- These models let us make statements of the type: "holding everything else constant, sales increased on average by 1000 per dollar spent on Facebook advertising" (this would be given by parameter $\beta_3$ in the example model). --- ### Estimation in multivariate regression Generalizing simple regression, we estimate $\beta$'s by minimizing an objective function that represents the difference between observed data and our expectation based on the linear model: $$ \begin{aligned} RSS & = \frac{1}{2} \sum_{i=1}^n (y_i - \hat{y}_i)^2 \\ {} & = \frac{1}{2} \sum_{i=1}^n (y_i - (\beta_0 + \beta_1 x_1 + \cdots + \beta_p x_p))^2 \end{aligned} $$ --- .center.image-70[![](img/multiple_rss.png)] --- The minimizer is found using numerical algorithms to solve this type of _least squares_ problems. Later in the course we will look at _stochastic gradient descent_, a simple algorithm that scales to very large datasets. --- ### Example (cont'd) ```{r, eval=FALSE} auto_fit <- lm(mpg~1+weight+cylinders+horsepower+displacement+year, data=Auto) auto_fit ``` --- ```{r, echo=FALSE} auto_fit <- lm(mpg~1+weight+cylinders+horsepower+displacement+year, data=Auto) auto_fit ``` --- From this model we can make the statement: "Holding everything else constant, cars run 0.76 miles per gallon more each year on average". --- ### Statistical statements (cont'd) Like simple linear regression, we can construct confidence intervals, and test a null hypothesis of no relationship ( $\beta_j=0$ ) for the parameter corresponding to each predictor. --- This is again nicely managed by the `broom` package: ```{r} auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats ``` --- ```{r, echo=FALSE} print_confint <- function(fit_df, term, digits=2) { i <- match(term, fit_df$term) confint_offset <- 1.95 * fit_df$std.error[i] confint <- round(c(fit_df$estimate[i] - confint_offset, fit_df$estimate[i], fit_df$estimate[i] + confint_offset), digits) paste0("{}_{", confint[1], "} ", confint[2], "_{", confint[3], "}") } print_pval <- function(fit_df, term) { i <- match(term, fit_df$term) pval <- fit_df$p.value[i] out <- ifelse(pval<1e-16, "<1e-16", paste0("=", pval)) paste0("P", out) } ``` In this case we would reject the null hypothesis of no relationship only for predictors `weight` and `year`. We would write the statement for year as follows: "Holding everything else constant, cars run $`r auto_fit_stats %>% print_confint("year")`$ miles per gallon more each year on average (`r auto_fit_stats %>% print_pval("year")`)". --- ### The F-test We can make additional statements for multivariate regression: "is there a relationship between _any_ of the predictors and the response?". Mathematically, we write this as $\beta_1 = \beta_2 = \cdots = \beta_p = 0$. --- As before, we can compare total outcome variance the residual sum of squared error $RSS$ using the $F$ statistic: $$ \frac{(\mathrm{TSS}-\mathrm{RSS})/p}{\mathrm{RSS}/(n-p-1)} $$ --- Back to our example, we use the `glance` function to compute this type of summary: ```{r} auto_fit %>% glance() %>% select(r.squared, sigma, statistic, df, p.value) %>% knitr::kable("html") ``` --- In comparison with the linear model only using `weight`, this multivariate model explains _more of the variance_ of `mpg`, but using more predictors. This is where the notion of _degrees of freedom_ comes in: we now have a model with expanded _representational_ ability. --- The bigger the model, we are conditioning more and more, given a fixed dataset, have fewer data points to estimate conditional expectation for each value of the predictors. estimated conditional expectation is less _precise_. --- To capture this phenomenon, we want statistics that tradeoff how well the model fits the data, and the "complexity" of the model. --- Now, we can look at the full output of the `glance` function: ```{r} auto_fit %>% glance() %>% knitr::kable("html") ``` --- Columns `AIC` and `BIC` display statistics that penalize model fit with model size. The smaller this value, the better. Let's now compare a model only using `weight`, a model only using `weight` and `year` and the full multiple regression model we saw before. --- ```{r} lm(mpg~weight, data=Auto) %>% glance() %>% knitr::kable("html") ``` --- ```{r} lm(mpg~weight+year, data=Auto) %>% glance() %>% knitr::kable("html") ``` --- In this case, using more predictors beyond `weight` and `year` doesn't help. --- ### Categorical predictors (cont'd) We saw transformations for categorical predictors with only two values. In our example we have the `origin` predictor, corresponding to where the car was manufactured, which has multiple values ```{r} Auto <- Auto %>% mutate(origin=factor(origin)) levels(Auto$origin) ``` As before, we can only use numerical predictors in linear regression models. We can encode these values using variables $x_1$ and $x_2$ $$ x_1 = \left\{ \begin{aligned} 1 & \textrm{ if origin=1} \\ 0 & \textrm{ o.w.} \end{aligned} \right. $$ $$ x_2 = \left\{ \begin{aligned} 1 & \textrm{ if origin=2} \\ 0 & \textrm{ o.w.} \end{aligned} \right. $$ --- The `lm` function in R does this transformation by default when a variable has class `factor`. We can see what the underlying numerical predictors look like by using the `model_matrix` function and passing it the model formula we build: ```{r, echo=FALSE} extended_df <- model.matrix(~origin, data=Auto) %>% as.data.frame() %>% mutate(origin = Auto$origin) extended_df %>% filter(origin == "1") %>% head() ``` --- ```{r, echo=FALSE} extended_df %>% filter(origin == "2") %>% head() ``` --- ```{r, echo=FALSE} extended_df %>% filter(origin == "3") %>% head() ``` --- layout: true ## Interactions in linear models --- The linear models so far include _additive_ terms for a single predictor. That let us made statemnts of the type "holding everything else constant...". But what if we think that a pair of predictors _together_ have a relationship with the outcome. --- We can add these _interaction_ terms to our linear models as products $$\mathbb{E} Y|X_1=x_1,X_2=x2 = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \beta_{12} x_1 x_2$$ --- Consider the advertising example: $$ \mathtt{sales} = \beta_0 + \beta_1 \times \mathtt{TV} + \beta_2 \times \mathtt{facebook} + \beta_3 \times (\mathtt{TV} \times \mathtt{facebook}) $$ If $\beta_3$ is positive, then the effect of increasing TV advertising money is increased if facebook advertising is also increased. --- When using categorical variables, interactions have an elegant interpretation. Consider our car example, and suppose we build a model with an interaction between `weight` and `origin`. --- Let's look at what the numerical predictors look like: ```{r, echo=FALSE} extended_df <- model.matrix(~weight+origin+weight:origin, data=Auto) %>% as.data.frame() %>% mutate(origin = Auto$origin) extended_df %>% filter(origin == "1") %>% head() ``` --- ```{r, echo=FALSE} extended_df %>% filter(origin == "2") %>% head() ``` --- ```{r, echo=FALSE} extended_df %>% filter(origin == "3") %>% head() ``` --- So what is the expected miles per gallon for a car with `origin == 1` as a function of weight? $$ \mathtt{mpg} = \beta_0 + \beta_1 \times \mathtt{weight} $$ --- Now how about a car with `origin == 2`? $$ \mathtt{mpg} = \beta_0 + \beta_1 \times \mathtt{weight} + \beta_2 + \beta_4 \times \mathtt{weight} $$ --- Now think of the graphical representation of these lines. For `origin == 1` the intercept of the regression line is $\beta_0$ and its slope is $\beta_1$. For `origin == 2` the intercept of the regression line is $\beta_0 + \beta_2$ and its slope is $\beta_1+\beta_4$. --- class: split-50 `ggplot` does this when we map a factor variable to a aesthetic, say color, and use the `geom_smooth` method: .column[ ```r Auto %>% ggplot(aes(x=weight, y=mpg, color=origin)) + geom_point() + geom_smooth(method=lm) ``` ] .column[ ```{r, echo=FALSE, fig.width=6, fig.align="center", message=FALSE, fig.height=5} Auto %>% ggplot(aes(x=weight, y=mpg, color=origin)) + geom_point() + geom_smooth(method=lm) ``` ] --- The intercept of the three lines seem to be different, but the slope of `origin == 3` looks different (decreases faster) than the slopes of `origin == 1` and `origin == 2` that look very similar to each other. --- Let's fit the model and see how much statistical confidence we can give to those observations: ```{r, echo=FALSE} auto_fit <- lm(mpg~weight*origin, data=Auto) auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats ``` --- There is still an issue here because this could be the result of a poor fit from a linear model, it seems none of these lines do a very good job of modeling the data we have. --- We can again check this for this model: ```{r, echo=FALSE} auto_fit %>% augment() %>% ggplot(aes(x=.fitted, y=.resid)) + geom_point() ``` --- layout: true ## Additional issues with linear regression --- Multiple linear regression introduces an additional issue that is extremely important to consider when interpreting the results of these analyses: collinearity. --- .center.image-70[![](img/collinearity.png)] --- In that case, the set of $\beta$'s that minimize RSS may not be unique, and therefore our interpretation is invalid. You can identify this potential problem by regressing predictors onto each other. The usual solution is to fit models only including one of the colinear variables. --- layout: false ## Summary Flexible, but highly biased method for modeling relationships between variables and deriving predictions for continuous attributes. We have seen how it is used in the context of EDA and statistical inference. Saw important caveats to their application. <file_sep>/materials/lectures/IntroStatLearn/testing_exercise.R # I hypothesize that <NAME> is polling at greater than 50% of the vote # in the Wisconsin senate race. According to the latest Marquette poll (http://www.realclearpolitics.com/epolls/2016/senate/wi/wisconsin_senate_johnson_vs_feingold-3740.html) # he got .48 of the vote out of 878 likely voters. # Q1: let's construct a confidence interval for the population proportion based # on the estimate that \hat{p}=.48 phat <- .48 n <- 878 standard_error <- sqrt(phat * (1-phat)) / sqrt(n) half_margin <- -qnorm(.05/2, sd=standard_error) phat_low <- phat - half_margin phat_high <- phat + half_margin # Q2: let's test the hypothesis that p>.50 # null: p <= .5 # alternative: p > .5 # what's the probability of observing a value p >= .48 _under the null hypothesis_ <file_sep>/materials/lectures/DataTypes/DataTypes.Rmd --- title: "Measurements and data types" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` In this section we discuss different data types we will run across in different datasets. Let's use our arrest dataset for since we've become familiar with it by now. As a reminder, this is how you can obtain that dataset: ```{r} if (!file.exists("BPD_Arrests.csv")) { download.file("http://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv", destfile="BPD_Arrests.csv") } arrest_tab <- read.csv("BPD_Arrests.csv", stringsAsFactors=FALSE) # remember there's a naming issue in this dataset we need to fix colnames(arrest_tab)[3:4] <- c("race", "sex") ``` ## Entities and attributes As a reminder, we are using the term _entities_ to refer to the objects to which data in a dataset refers to. For instance, in our example dataset, each arrest is an _entity_. In a rectangular dataset, as we've seen before, this usually corresponds to rows in a table. We then say that a dataset contains _attributes_ for each entity. For instance, attributes of each arrest would be the person's _age_, the type of offense, the location, etc. In a rectangular dataset, this corresponds to the columns in a table. This language of _entities_ and _attributes_ is commonly used in the database literature. In statistics you may see _experimental units_ or _samples_ for _entities_ and _covariates_ for _attributes_. In other instances _observations_ for _entities_ and _variables_ for _attributes_. In Machine Learning you may see _example_ for _entities_ and _features_ for _attributes_. For the most part, all of these are exchangable. This unit is concerned with the types of data we may encounter as _attributes_ in data analyses. ## Categorical data A categorical attribute for a given entity can take only one of a finite set of examples. For example, the `sex` variable can only have value `M`, `F`, or `` (we'll talk about missing data later in the semester). ```{r} table(arrest_tab$sex) ``` The result of a coin flip is categorical: `heads` or `tails`. The outcome of rolling an 8-sided die is categorical: `one`, `two`, ..., `eight`. Can you think of other examples? ### Factors in R We said that R is designed for data analysis. My favorite example of how that manifests itself is the `factor` datatype. If you look at your dataset now, `arrest_tab$sex` is a vector of strings: ```{r} class(arrest_tab$sex) summary(arrest_tab$sex) ``` However, as a measurement, or attribute, it should only take one of two values (or three depending on how you record missing, unknown or unspecified). So, in R, that categorical data type is called a _factor_. Notice what the `summary` function does after turning the `sex` attribute into a _factor_: ```{r} arrest_tab$sex <- factor(arrest_tab$sex) summary(arrest_tab$sex) ``` This distinction shows up in many other places where functions have very different behavior when called on a vector of strings and when called on a factor (e.g., functions that make plots, or functions that learn statistical models). The possible values a _factor_ can take are called _levels_: ```{r} levels(arrest_tab$sex) ``` Exercise: you should transform the `race` attribute into a factor as well. How many levels does it have? Another note, when we used `read.csv` above to read our dataset from a text file, we used the argument `stringsAsFactors`. See if you can understand what that argument is used for. ## Discrete numeric data These are attributes that can take specific values from elements of ordered, discrete (possibly infinite) sets. The most common set in this case would be the non-negative positive integers. This data is commonly the result of counting processes. In our example dataset, age, measured in years, is a discrete attribute. Frequently, we obtain datasets as the result of summarizing, or aggregating other underlying data. In our case, we could construct a new dataset containing the number of arrests per neighborhood: ```{r} library(dplyr) arrest_tab %>% group_by(neighborhood) %>% summarize(number_of_arrests=n()) %>% head() ``` In this new dataset, the _entities_ are each neighborhood, the `number_of_arrests` attribute is a _discrete_ attribute. Other examples: the number of students in a class is discrete, the number of friends for a specific Facebook user. Can you think of other datasets? Distinctions between categorical and discrete numerical data is that categorical data is not ordered and finite, discrete numeric data is (possibly) infite and ordered. ## Continuous numeric data These are attributes that can take any value in a continuous set. For example, a person's height, in say inches, can take any number (within the range of human heights). In our example dataset we do not have this type of data. However, here is another dataset we can use to look at this datatype: ```{r} data(cars) head(cars) plot(cars$speed, cars$dist, pch=19, xlab="speed (mph)", ylab="stopping distance (ft)") ``` The distinction between continuous and discrete is a bit tricky since measurements that have finite precision are, in a sense, discrete. Remember, however, that continuity is not a property of the specific dataset you have in hand, but rather of the process you are measuring. The number of arrests in a neighborhood cannot, in principle, be fractional, regardless of the precision at which we measure this. If we had the appropriate tool, we could measure a person's height with infinite precision. This distinction is very important when we build statistical models of datasets for analysis. For now, think of discrete data as the result of counting, and continuous data the result of some physical measurement. ## Other fun examples Consider a dataset of images like the super-famous [MNIST dataset of handwritten digits](https://www.kaggle.com/c/digit-recognizer). This dataset contains images of handwritten digits. So each image is an _entity_. Each image has a _label_ attribute which states which of the digits 0,1,...9 is represented by the image. What type of data is this (categorical, continuous numeric, or discrete numeric)? Now, each image is represented by grayscale values in a 28x28 grid. That's 784 attributes, one for each square in the grid, containing a grayscale value. Now what type of data are these other 784 attributes? ## Other important datatypes The three datatypes we saw above encompass a fairly large swath of data you will come across. Our arrest dataset contains other important datatypes that we will run across frequently: - Datetime: Date and time of some event or observation (e.g., `arrestDate`, `arrestTime`) - Geolocation: Latitude and Longitude of some event or observation (e.g., `Location.`) ## Units Something that we tend to forget but is **extremely** important for the modeling and interpretation of data is that attributes are for the most part _measurements_ and that they have _units_. For example, age of a person can be measured in different units: _years_, _months_, etc. These can be converted to one another, but nonetheless in a given dataset, that _attribute_ or measurement will be recorded in some specific units. Similar arguments go for distances and times, for example. In other cases, we may have unitless measurements (we will see later an example of this when we do _dimensionality reduction_). In these cases, it is worth thinking about _why_ your measurements are unit-less. When performing analyses that try to summarize the effect of some measurement or attribute on another, units matter a lot! We will see the importance of this in our _regression_ section. For now, make sure you make a mental note of units for each measurement you come across. This will force you to think about where and how your data was obtained, which will become very important when modeling and interpreting the results of these models. <file_sep>/materials/slides/basic_plotting/plotting.Rmd --- title: "Plotting" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: chakra: libs/remark-0.14.0.min.js lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Basic Plotting] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] --- ## Data Visualization ```{r setup_cache_07, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` ```{r setup_07, echo=FALSE, message=FALSE} library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") ``` We will spend a good amount of time in the course discussing data visualization. It serves many important roles in data analysis. We use it to gain understanding of dataset characteristics throughout analyses and it is a key element of communicating insights we have derived from data analyses with our target audience. --- layout: true ## Grammar of Graphics (ggplot) --- In this section, we will introduce basic functionality of the `ggplot` package (available in both R and python) to start our discussion of visualization throughout the course. The `ggplot` package is designed around the Entity-Attribute data model. Also, it can be included as part of data frame operation pipelines. --- class: split-40 .column[ Let's create a _dot plot_ of the number of arrests per district in our dataset: ```{r, eval=FALSE} arrest_tab %>% group_by(district) %>% summarize(num_arrests=n()) %>% ggplot(mapping=aes(y=district, x=num_arrests)) + geom_point() ``` ] .column[ ```{r district_dotplot, eval=TRUE, echo=FALSE} arrest_tab %>% group_by(district) %>% summarize(num_arrests=n()) %>% ggplot(mapping=aes(y=district, x=num_arrests)) + geom_point() ``` ] --- The `ggplot` design is very elegant, takes some thinking to get used to, but is extremely powerful. The central premise is to characterize the building pieces behind `ggplot` plots as follows: 1. The **data** that goes into a plot, a data frame of entities and attributes 2. The **mapping** between data attributes and graphical (aesthetic) characteristics 3. The *geometric* representation of these graphical characteristics --- So in our example we can fill in these three parts as follows: 1) **Data**: We pass a data frame to the `ggplot` function with the `%>%` operator at the end of the group_by-summarize pipeline. 2) **Mapping**: Here we map the `num_arrests` attribute to the `x` position in the plot and the `district` attribute to the `y` position in the plot. Every `ggplot` will contain one or more `aes` calls. 3) **Geometry**: Here we choose points as the _geometric_ representations of our chosen graphical characteristics using the `geom_point` function. --- In general, the `ggplot` call will have the following structure: ```{r ggplot_schema, eval=FALSE} <data_frame> %>% ggplot(mapping=aes(<graphical_characteristic>=<attribute>)) + geom_<representation>() ``` --- layout: true ## Plot Construction Details --- ### Mappings | Argument | Definition | |----------|------------| | `x` | position along x axis | | `y` | position along y axis | | `color` | color | | `shape` | shape (applicable to e.g., points) | `size` | size | | `label` | string used as label (applicable to text) --- ### Representations | Function | Representation | |----------|----------------| | `geom_point` | points | | `geom_bar` | rectangles | | `geom_text` | strings | | `geom_smooth` | smoothed line (advanced) | | `geom_hex` | hexagonal binning | --- We can include multiple geometric representations in a single plot, for example points and text, by adding (`+`) multiple `geom_<representation>` functions. Also, we can include mappings inside a `geom_` call to map characteristics to attributes strictly for that specific representation. For example `geom_point(mapping=aes(color=<attribute>))` maps color to some attribute only for the point representation specified by that call. Mappings given in the `ggplot` call apply to _all_ representations added to the plot. --- layout: true ## Frequently Used Plots --- We will look comprehensively at data visualization in more detail later in the course, but for now will list a few common plots we use in data analysis and how they are created using `ggplot`. Let's switch data frame to the `mpg` dataset for our examples: ```{r load_mpg} mpg ``` --- layout: false class: split-50 ## Scatter plot Used to visualize the relationship between two attributes. .column[ ```{r, eval=FALSE} mpg %>% ggplot(mapping=aes(x=displ, y=hwy)) + geom_point(mapping=aes(color=cyl)) ``` ] .column[ ```{r scatter_example, echo=FALSE, fig.height=5, fig.align='center', fig.width=6} mpg %>% ggplot(mapping=aes(x=displ, y=hwy)) + geom_point(mapping=aes(color=cyl)) ``` ] --- class: split-50 ## Bar graph Used to visualize the relationship between a continuous variable to a categorical (or discrete) attribute .column[ ```{r, eval=FALSE} mpg %>% group_by(cyl) %>% summarize(mean_mpg=mean(hwy)) %>% ggplot(mapping=aes(x=cyl, y=mean_mpg)) + geom_bar(stat="identity") ``` ] .column[ ```{r bargraph_mpg, echo=FALSE, fig.height=5, fig.width=5} mpg %>% group_by(cyl) %>% summarize(mean_mpg=mean(hwy)) %>% ggplot(mapping=aes(x=cyl, y=mean_mpg)) + geom_bar(stat="identity") ``` ] --- class: split-50 ## Histogram Used to visualize the distribution of the values of a numeric attribute .column[ ```{r, eval=FALSE, message=FALSE} mpg %>% ggplot(mapping=aes(x=hwy)) + geom_histogram() ``` ] .column[ ```{r hist_mpg, echo=FALSE, message=FALSE, fig.height=5, fig.width=5} mpg %>% ggplot(mapping=aes(x=hwy)) + geom_histogram() ``` ] --- class: split-50 ## Boxplot Used to visualize the distribution of a numeric attribute based on a categorical attribute .column[ ```{r, eval=FALSE} mpg %>% ggplot(mapping=aes(x=class, y=hwy)) + geom_boxplot() ``` ] .column[ ```{r hist_dot, echo=FALSE, fig.height=5, fig.width=5} mpg %>% ggplot(mapping=aes(x=class, y=hwy)) + geom_boxplot() ``` ] <file_sep>/materials/lectures/Wrangling/missing.md Missing Data ======================================= author: <NAME> date: CADi 2015 Missing Data ======================================== When to remove? When to *impute*? - missing at random vs. missing systematically Missing Data ======================================== When to remove? When to *impute*? - data that is missing systematically can significantly bias an analysis - For example: - Want to predict how sick someone is from test result - if doctors do not carry out the test because a patient is too sick - then the fact test is missing is a great predictor of how sick patient is Missing Data ===================================== **First step**: understand *why* and *how* data may be missing I.e., talk to collaborator **Second step**: if a relatively small fraction of observations contain have missing values, then remove observations. Dealing with data missing at random ===================================== - Categorical: encode missing as value ```r is.missing <- is.na(tb2$iso2) tb2 %>% mutate(iso2_fixed=factor( ifelse(!is.missing, tb2$iso2, "missing"))) ``` Dealing with data missing at random ======================================= - Numeric: impute - Simple method: replace missing values for a variable with mean of non-missing values ```r is.missing <- is.na(flights$dep_delay) flights %>% mutate(dep_delay_fixed = ifelse(!is.missing, dep_delay, mean(dep_delay, na.rm=TRUE))) ``` Dealing with data missing at random ======================================= - Numeric: impute - More complex method: replace missing values for a variable predicting from other variables when variables are related. ```r is.missing <- is.na(flights$dep_delay) # use average delay condition on origin airport fit <- flights %>% lm(dep_delay~origin, data=.) flights %>% mutate(dep_delay_fixed = ifelse(!is.missing, dep_delay, predict(fit, newdata=flights))) ``` Dealing with data missing at random ======================================= - Numeric: impute - In either case, a common approach is to add an additional indicator variable stating if continous missing measurement was imputed ```r flights %>% mutate(dep_delay_missing = is.na(dep_delay)) ``` <file_sep>/materials/lectures/Unsupervised/Unsupervised.Rmd --- title: "Unsupervised Methods" author: CMSC320 date: "`r Sys.Date()`" --- ```{r, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` ### Introduction So far we have seen "Supervised Methods" where interest is in analyzing a _response_ (or outcome) based on various _predictors_. In many cases, especially for Exploratory Data Analysis, we want methods to extract patterns on variables without analyzing a specific _response_. Methods for the latter case are called "Unsupervised Methods". Examples are _Principal Component Analysis_ and _Clustering_. Interpretation of these methods is much more _subjective_ than in Supervised Learning. For example: if we want to know if a given _predictor_ is related to _response_, we can perform statistical inference using hypothesis testing. In another example, if we want to know which predictors are useful for prediction: use cross-validation to do model selection. Finally, if we want to see how well we can predict a specific response, we can use cross-validation to report on test error. In unsupervised methods, there is similar clean evaluation methodology. Nonetheless, they can be very useful methods to understand data at hand. ### Motivating Example We will use an application in genetics as a motivating example for dimensionality reduction methods.Recent technological advances have allowed identification of locations in human genome (DNA) that vary frequently across human populations (Single Nucleotide Polymorphisms, or SNPs). This permits biomedical researchers to identify changes in DNA that are associated with specific traits, e.g., susceptibility to disease, or protection from disease, which could greatly improve effectiveness and quality of care for certain diseases. For our example, we will look at a dataset of 4,929 SNPs for 1,093 individuals from populations across the globe. This data is available in the class docker image at `/home/ids_materials/geno_data.rda`. ```{r, message=FALSE} library(dplyr) load("geno_data.rda") print(dim(filtered_geno_data)) table(filtered_geno_data$super_population) ``` The encoding of the origin of each subject in the sample is given by _AFR: Africa, AMR: America, EAS: East Asia, EUR: Europe, OPT: A mystery_ Let's take a look at a few of the variables included in this dataset: ```{r} filtered_geno_data %>% select(1:6) %>% head(10) %>% knitr::kable() ``` Each of the `rsXXXX` columns corresponds to a SNP (location in the human genome that varies across populations). These are fairly well annotated, e.g, [http://snpedia.com/index.php/rs1799971](http://snpedia.com/index.php/rs1799971). For this specific SNP, the vast majority (about 80%) of people in the world inherited an `A` from both mother and father. We say they have the `A/A` allele. Other people have a different allele, `A/G` or `G/G`, meaning they inherited a mutation (`G` instead of `A`) from either mother or father (in the first case) or both (in the second case). These two rare alleles have been associated with increased susceptibility to alcoholism. Let's see what the subjects in the dataset look like with respect to this SNP: ```{r} table(filtered_geno_data$rs1799971) ``` In this dataset 733 individuals have the `A/A` allele (coded as `0`), 305 have the `A/G` allele (coded as `1`) and 55 have the `G/G` allele (coded as `2`). In this dataset all SNPs are coded the same way: - `0`: they have the most frequent allele - `1`: they inherited one copy of the mutation - `2`: they inherited two copies of the mutation In exploratory data analysis we would like to visualize this data for 4k variables. We could make scatter plots for every pair of variables but that is obviously not feasible. This is where unsupervised methods for dimensionality reduction are helpful. ### Principal Component Analysis Principal Component Analysis (PCA) is a dimensionality reduction method. The goal is to _embed data in high dimensional space (e.g., observations with a large number of variables), onto a small number of dimensions_. Note that its most frequent use is in EDA and visualization, but it can also be helpful in regression (linear or logistic) where we can transform input variables into a smaller number of predictors for modeling. The PCA problem is then: Given: - Data set $\{\mathbf{x}_1, \mathbf{x}_2, \ldots, \mathbf{x}_n\}$, where $\mathbf{x}_i$ is the vector of $p$ variable values for the $i$-th observation. Return: - Matrix $\left[ \phi_1, \phi_2, \ldots, \phi_p \right]$ of _linear transformations_ that retain _maximal variance_. You can think of the first vector $\phi_1$ as a linear transformation that embeds observations into 1 dimension: $$ Z_1 = \phi_{11}X_1 + \phi_{21} X_2 + \cdots + \phi_{p1} X_p $$ where $\phi_1$ is selected so that the resulting dataset $\{ z_1, \ldots, z_n\}$ has _maximum variance_. In order for this to make sense mathematically data has to be centered (we saw this in our transformations unit), i.e., each $X_j$ has mean equal to zero and transformation vector $\phi_1$ has to be normalized, i.e., $\sum_{j=1}^p \phi_{j1}^2=1$. We can find $\phi_1$ by (surprise!) solving an optimization problem: $$ \max_{\phi{11},\phi_{21},\ldots,\phi_{p1}} \frac{1}{n} \sum_{i=1}^n \left( \sum_{j=1}^p \phi_{j1} x_{ij} \right)^2 \\ \mathrm{s.t.} \sum_{j=1}^p \phi_{j1}^2 = 1 $$ Conceptually this optimization problem says _maximize variance_ but _subject to normalization constraint_. The second transformation $\phi_2$ is obtained next solving a similar problem with the added constraint that $\phi_2$ **is orthogonal** to $\phi_1$. Taken together $\left[ \phi_1, \phi_2 \right]$ define a pair of linear transformations of the data into 2 dimensional space. $$ Z_{n\times 2} = X_{n \times p} \left[ \phi_1, \phi_2 \right]_{p \times 2} $$ ### Motivating example continued Let's apply PCA to our genotype data. ```{r} library(dplyr) load("geno_data.rda") # do principal component analysis # genotypes pr_out <- filtered_geno_data %>% select(contains("rs")) %>% prcomp(scale=TRUE) # get the embedded samples and add additional sample information embedded_samples <- pr_out$x[,1:3] %>% as.data.frame() %>% bind_cols(select(filtered_geno_data, sample_name, population, super_population)) embedded_samples %>% head() %>% knitr::kable() ``` Each of the columns of the $Z$ matrix are called _Principal Components_. The units of the PCs are _meaningless_. In particular, comparing numbers _across_ PCs doesn't make mathematical sense. In this case we also used a scaling transformation on the variables $X_j$ to have unit variance (argument `scale` to the `prcomp` function). I would not have done that with this dataset, but we'll see why I did it shortly. In general, if variables $X_j$ are measured in different units (e.g, miles vs. liters vs. dollars), variables should be scaled to have unit variance. Conversely, if they are all measured in the same units (as in this example), you should not scale them. Let's make a scatter plot of the embedded dataset resulting from PCA: ```{r} library(ggplot2) embedded_samples %>% ggplot(aes(x=PC1, y=PC2, color=super_population)) + geom_point() + scale_color_brewer(palette="Dark2") ``` We see that the first PC roughly corresponds to African population and the second component rouhgly corresponds to Eastern Asian population. The American population, however, is not as well defined as the other populations. Remember that we did not used these labels to embed the data into two dimensions, we are using them afterwards to aid interpretation. We can also look at the $\phi$ vectors that define the transformations (aka _loadings_) to see how much weight each of the original variables in the dataset is assigned to each PC. This analysis only makes sense if all variables are on the same scale (thus, the reason for a scaling transformation). For example, we can find the SNPs with highest weight for the first principal component: ```{r,echo} o <- order(-abs(pr_out$rotation[,1])) snp_indices <- o[1:10] snp_ids <- rownames(pr_out$rotation)[snp_indices] snp_tab <- data.frame(SNP=sprintf("[%s](http://snpedia.com/index.php/%s)", snp_ids, snp_ids), PC1=round(pr_out$rotation[snp_indices,1],4), PC2=round(pr_out$rotation[snp_indices,2],4)) %>% magrittr::set_rownames(NULL) knitr::kable(snp_tab) ``` A type of plot commonly used in PCA analysis is the `biplot` which combines visualization of embedded data and loadings. **I don't like it!** ```{r, echo=FALSE, fig.width=7, fig.height=7} library(png) library(grid) img <- readPNG("10_1.png") grid.raster(img) ``` ### Other PCA practicalities A natural question that arises is: How many PCs should we consider in post-hoc analysis? One result of PCA is a measure of the variance corresponding to each PC relative to the total variance of the dataset. From that we can calculate the _percentage of variance explained_ for the $m$-th PC: $$ PVE_m=\frac{\sum_{i=1}^n z_{im}^2}{\sum_{j=1}^p \sum_{i=1}^n x_{ij}^2} $$ We can use this measure to choose number of PCs in an ad-hoc manner In our case, using more than 3 PCs does not add information. ```{r} pr_vars <- pr_out$sdev^2 pve <- pr_vars / sum(pr_vars) plot(pve[1:10]*100, type="b", xlab="PC", ylab="PVE", lwd=2) ``` A useful _rule of thumb_: - If no apparent patterns in first couple of PCs, stop! - Otherwise, look at other PCs using PVE as guide. Still, this is very much ad-hoc, and no commonly agreed upon method for choosing number of PCs used in practice. ### Our motivating example explained A final caveat: PCA is **notoriously easy** to over-interpret... Let's reveal the source of our motivating data. This came from a [blog post](https://liorpachter.wordpress.com/2014/12/02/the-perfect-human-is-puerto-rican/) by [<NAME>](https://math.berkeley.edu/~lpachter/) a mathematician and computational biologist at the University of Berkley. He used real genotype data from the [1000 genomes project](http://www.1000genomes.org/), but created, _in silico_, a hypothetical human being where each SNP was set optimally. That is, if it's a protective mutation according to the genetics literature, then mutation was applied to this fake subject, if it's a deleterious mutation, then mutation was not applied to this fake subject. This hypothetical human being is the `OPT` population ```{r} embedded_samples %>% ggplot(aes(x=PC1, y=PC2, color=super_population)) + geom_point() + scale_color_brewer(palette="Dark2") + annotate("text", x=10, y=14, label="Perfect Human", size=7) + geom_segment(x=2,y=14,xend=-1,yend=14,arrow=arrow(length=unit(0.3, "cm")),color="black") ``` Which from PC3 you can see it is indeed hypothetical ```{r} embedded_samples %>% ggplot(aes(x=PC1, y=PC3, color=super_population)) + geom_point() + scale_color_brewer(palette="Dark2") + annotate("text", x=10, y=-180, label="Perfect Human", size=7) + geom_segment(x=2,y=-180,xend=-1,yend=-190,arrow=arrow(length=unit(0.3, "cm")),color="black") ``` Now, things got interesting when he reported that the nearest individual in the _embedded_ dataset to this perfect human was a Puerto Rican woman: ```{r, results="asis"} # get the two-dimensional embedding of fake subject perfect_sample <- as.numeric(select(embedded_samples[1,], PC1, PC2)) # calculate the distance between fake subject and real subjects in # the 2 dimensional space using Euclidean distance dist <- embedded_samples %>% select(PC1, PC2) %>% as.matrix() %>% # these next few lines compute Euclidean distance using # piped operators sweep(., MARGIN=2, STATS=perfect_sample, FUN="-") %>% magrittr::raise_to_power(2) %>% rowSums() %>% sqrt() # now find the nearest real subject o <- order(dist) sample_name <- embedded_samples$sample_name[o[2]] sprintf("[%s](https://catalog.coriell.org/0/sections/Search/Sample_Detail.aspx?Ref=%s&PgId=166)", sample_name, sample_name) ``` The over-interpretation of PCA strikes: [Medical Daily](http://www.medicaldaily.com/biologist-says-puerto-rican-women-possess-ideal-genotype-perfect-human-dna-ancestry-313956) [HuffPost](http://www.huffingtonpost.com/julio-pabon/the-closet-perfect-human-_b_6304366.html) [El Nuevo Dia (Puerto Rican Newspaper)](http://www.elnuevodia.com/ciencia/ciencia/nota/serhumanoperfectoseriapuertorriqueno-1903858/) [Google Translate](https://translate.google.com/translate?sl=es&tl=en&js=y&prev=_t&hl=en&ie=UTF-8&u=http%3A%2F%2Fwww.elnuevodia.com%2Fciencia%2Fciencia%2Fnota%2Fserhumanoperfectoseriapuertorriqueno-1903858%2F&edit-text=&act=url) [Latin Times](http://www.latintimes.com/new-study-reveals-perfect-human-genetically-speaking-caribbean-island-280363) [The Backlash...](http://globalvoicesonline.org/2014/12/17/the-perfect-human-doesnt-live-in-puerto-rico-or-any-other-country/) [_and a lot more_](https://www.google.com/?gws_rd=ssl#q=perfect+human+pachter) In fact, <NAME> published this analysis precisely due to the common over-interpretation of simple data analyses like these. This exercise was a great example of how easily over-interpretation of data analysis (PCA in this case), and our simplistic models of very complex systems (the genotype-phenotype relationship) can lead to conclusions that without thoughtful and ethical thinking can be easily over-hyped. ### Summary Principal Component Analysis is a conceptually simple but powerful EDA tool. It is very useful at many stages of analyses. Its interpretation can be very ad-hoc, however. It is part of large set of unsupervised methods based on _matrix decompositions_ <file_sep>/materials/lectures/eda/missing_data.Rmd --- title: "Handling missing data" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r, echo=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(png) library(grid) library(tidyr) library(dplyr) library(readr) ``` We can now move on to a very important aspect of data preparation and transformation: how to deal with missing data? By missing data we mean values that are unrecorded, unknown or unspecified in a dataset. We saw an example of this when we looked at the tidy unit. Here is the tidy weather dataset again: ```{r, eval=FALSE} data_dir <- "/home/ids_materials/tidy_unit" weather <- read_csv(file.path(data_dir, "weather.csv")) weather ``` ```{r, echo=FALSE} data_dir <- "~/Teaching/CMSC320_Fall2016/materials/lectures/DataModels/tidyr/vignettes" weather <- read_csv(file.path(data_dir, "weather.csv")) weather ``` And the result of tidying this dataset: ```{r} tidy_weather <- weather %>% gather(day, temp, d1:d31) %>% spread(element, temp) tidy_weather ``` In this dataset, temperature observations coded as `NA` are considered _missing_. Now, we can imagine that either the measurement failed in a specific day for a specific weather station, or that certain stations only measure temperatures on certain days of the month. Knowing which of these applies can change how we approach this missing data. As you can see, how to treat missing data depends highly on how the data was obtained, and the more you know about a dataset, the better decision you can make. In general, the central question with missing data is: Should we remove observations with missing values, or should we *impute* missing values? This also relates to the difference between values that are missing _at random_ vs. values that are missing _systematically_. In the weather example above, the first case (of failed measurements) could be thought of as missing _at random_, and the second case as missing _systematically_. Data that is missing systematically can significantly bias an analysis. For example: Suppose we want to predict how sick someone is from test result. If doctors do not carry out the test because a patient is too sick, then the fact test is missing is a great predictor of how sick the patient is. So in general, the **first step** when dealing with missing data is to understand *why* and *how* data may be missing. I.e., talk to collaborator, or person who created the dataset. Once you know that, if a relatively small fraction of observations contain have missing values, then it may be safe to remove observations. ```{r} tidy_weather_nomissing <- tidy_weather %>% mutate(missing = is.na(tmax) | is.na(tmin)) %>% filter(!missing) %>% select(-missing) tidy_weather_nomissing ``` ### Dealing with data missing at random In the case of categorical variables, a useful approach is to encode missing as a new category and include that in subsequent modeling. ```{r, eval=FALSE} is.missing <- is.na(tb2$iso2) tb2 %>% mutate(iso2_fixed=factor( ifelse(!is.missing, tb2$iso2, "missing"))) ``` In the case of numeric values, we can use a simple method for imputation where we replace missing values for a variable with, for instance, the mean of non-missing values ```{r, eval=FALSE} is.missing <- is.na(flights$dep_delay) flights %>% mutate(dep_delay_fixed = ifelse(!is.missing, dep_delay, mean(dep_delay, na.rm=TRUE))) ``` A more complex method is to replace missing values for a variable predicting from other variables when variables are related (we will see linear regression using the `lm` and `predict` functions later on) ```{r, eval=FALSE} is.missing <- is.na(flights$dep_delay) # use average delay condition on origin airport fit <- flights %>% lm(dep_delay~origin, data=.) flights %>% mutate(dep_delay_fixed = ifelse(!is.missing, dep_delay, predict(fit, newdata=flights))) ``` In either case, a common approach is to add an additional indicator variable stating if numeric missing value was imputed ```{r, eval=FALSE} flights %>% mutate(dep_delay_missing = is.na(dep_delay)) ``` <file_sep>/content/lecture-note/wrangling/index.md --- date: 2016-09-18T15:00:57-04:00 title: Data Wrangling --- Data management and manipulation [Data wrangling with `dplyr`](wrangling_dplyr/) [Data wrangling with SQL](wrangling_sql/) ## Additional Resources - Introduction to `dplyr`: [http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html](http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html) - `dplyr` Cheatsheet: [http://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf](http://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf) <file_sep>/materials/slides/representation_models/representation-models.Rmd --- title: "Data Representation Models" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: chakra: "libs/remark-0.14.0.min.js" lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Data Representation Models] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] --- layout: true ## Overview --- ```{r setup1, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) library(tidyverse) ``` Principles of preparing and organizing data in a way that is amenable for analysis. -- **Data representation model**: collection of concepts that describes how data is represented and accessed. -- Thinking abstractly of data structure, beyond a specific implementation, makes it easier to share data across programs and systems, and integrate data from different sources. --- - **Structure**: We have assumed that data is organized in rectangular data structures (tables with rows and columns) - **Semantics**: We have discussed the notion of _values_, _attributes_, and _entities_. -- So far, _data semantics_: a dataset is a collection of _values_, numeric or categorical, organized into _entities_ (_observations_) and _attributes_ (_variables_). -- Each _attribute_ contains values of a specific measurement across _entities_, and _entities_ collect all measurements across _attributes_. --- In the database literature, we call this exercise of defining structure and semantics as _data modeling_. -- In this course we use the term _data representational modeling_, to distinguish from _data statistical modeling_. --- layout: true ## Data representational modeling --- - **Data model**: A collection of concepts that describes how data is represented and accessed - **Schema**: A description of a specific collection of data, using a given data model --- - Modeling Constructs: A collection of concepts used to represent the structure in the data. Typically we need to represent types of *entities*, their *attributes*, types of *relationships* between *entities*, and *relationship attributes* --- - Integrity Constraints: Constraints to ensure data integrity (i.e., avoid errors) -- - Manipulation Languages: Constructs for manipulating the data --- We desire that models are: - sufficiently _expressive_ so they can capture real-world data well, - _easy to use_, - lend themselves to defining computational methods that have good performance. --- Some examples of data models are - Relational, Entity-relationship model, XML... - Object-oriented, Object-relational, RDF... - Current favorites in the industry: JSON, Protocol Buffers, [Avro](http://avro.apache.org/docs/current/), Thrift, Property Graph --- - **Data independence:** The idea that you can change the representation of data w/o changing programs that operate on it. - **Physical data independence:** I can change the layout of data on disk and my programs won't change - index the data - partition/distribute/replicate the data - compress the data - sort the data --- layout: true ## The Entity-Relationship and Relational Models --- class: split-50 .column[ Modeling constructs: - _entities_ and their _attributes_ - _relationships_ and _relationship attributes_. Entities are objects represented in a dataset: people, places, things, etc. Relationships model just that, relationships between entities. ] .column[ .center.middle.image-50[![](img/er2.png)] ] --- class: split-50 .column[ Diagrams: - rectangles are _entitites_ - diamonds and edges indicate _relationships_ - Circles describe either entity or relationship _attributes_. ] .column[ .center.middle.image-50[![](img/er2.png)] ] --- class: split-50 .column[ Arrows are used indicate multiplicity of relationships ] .column[ .center.middle.image-50[![](img/relationships.png)] ] --- Relationships are defined over _pairs_ of entities. Relationship $R$ over sets of entities $E_1$ and $E_2$ is defined over the _cartesian product_ $E_1 \times E_2$. For example: if $e_1 \in E_1$ and $e_2 \in E_2$, then $(e_1, e_2) \in R$. --- Arrows specify how entities participate in relationships. For example: this diagram specifies that entities in $E_1$ appear in _only one_ relationship pair. .center.image-50[![](img/rel_example.png)] That is, if $e_i \in E_1$, $e_j \in E_2$ and $(e_i, e_j) \in R$, then there is no other pair $(e_i, e_k) \in R$. --- In databases and general datasets we work on, both Entities and Relationships are represented as _Relations_ (tables). -- Such that a _unique_ entity/relationship is represented by a single tuple (the list of attribute values that represent an entity or relationship). -- How can we ensure _uniqueness_ of entities? -- _keys_ are an essential ingredient to uniquely identify entities and relationships in tables. --- layout: true ## Formal introduction to keys --- - Attribute set $K$ is a **superkey** of relation $R$ if values for $K$ are sufficient to identify a unique tuple of each possible relation $r(R)$ - Example: `{SSN}` and `{SSN,name}` are both superkeys of *person* -- - Superkey $K$ is a **candidate key** if $K$ is minimal - Example: `{SSN}` is a candidate key for *person* -- - One of the candidate keys is selected to be the **primary key** - Typically one that is small and immutable (doesn’t change often) - Primary key typically highlighted in ER diagram --- - **Foreign key**: Primary key of a relation that appears in another relation - `{SSN}` from *person* appears in *employs* - *person* called referenced relation - *employs* is the referencing relation -- - **Foreign key constraint**: the tuple corresponding to that primary key must exist - Imagine: - Tuple: `('123-45-6789', 'Apple') `in *employs* - But no tuple corresponding to '123-45-6789' in *person* - Also called referential integrity constraint --- layout: true ## Tidy Data --- We use the term _Tidy Data_ to refer to datasets that are represented in a form that is amenable for manipulation and statistical modeling. It is very closely related to the concept of _normal forms_ in the ER model and the process of _normalization_ in the database literature. --- Here we assume we are working in the ER data model represented as _relations_: rectangular data structures where 1. Each attribute (or variable) forms a column 2. Each entity (or observation) forms a row 3. Each type of entity (observational unit) forms a table --- class: split-50 Here is an example of a tidy dataset: One entity per row, a single attribute per column. Only information about flights included. ```{r, echo=FALSE} library(nycflights13) head(flights) %>% knitr::kable("html") ``` --- layout: true ## Structure Query Language --- The Structured-Query-Language (SQL) is the predominant language used in database systems. It is tailored to the Relational data representation model. SQL is a declarative language, we don't write a _procedure_ to compute a relation, we _declare_ what the relation we want to compute looks like. --- The basic construct in SQL is the so-called `SFW` construct: _select-from-where_ which specifies: - _select_: which attributes you want the answer to have - _from_: which relation (table) you want the answer to be computed from - _where_: what conditions you want to be satisfied by the rows (tuples) of the answer --- E.g.: movies produced by Disney in 1990: note the *rename* ```sql select m.title, m.year from movie m where m.studioname = 'disney' and m.year = 1990 ``` --- The **select** clause can contain expressions (this is paralleled by the `mutate` operation we saw previously) - `select title || ' (' || to_char(year) || ')' as titleyear` - `select 2014 - year` --- The **where** clause support a large number of different predicates and combinations thereof (this is parallel to the `filter` operation) - `year between 1990 and 1995` - `title like 'star wars%'` ` title like 'star wars _'` --- We can include ordering, e.g., find distinct movies sorted by title ```sql select distinct title from movie where studioname = 'disney' and year = 1990 order by title; ``` --- ### Group-by and summarize SQL has an idiom for grouping and summarizing E.g., compute the average movie length by year ```sql select name, avg(length) from movie group by year ``` --- layout: true ## Two-table operations --- So far we have looked at data operations defined over single tables and data frames. In this section we look at efficient methods to combine data from multiple tables. The fundamental operation here is the `join`, which is a workhorse of database system design and impementation. --- ### The `join` operation: Combines rows from two tables to create a new single table Based on matching criteria specified over attributes of each of the two tables. --- Consider a database of `flights` and `airlines`: ```{r, include=FALSE} library(nycflights13) data(flights) data(airlines) ``` ```{r} flights ``` --- ```{r} airlines ``` --- Here, we want to add airline information to each flight. Join the attributes of the respective airline from the `airlines` table with the `flights` table based on the values of attributes `flights$carrier` and `airlines$carrier`. --- Every row of `flights` with a specific value for `flights$carrier`, is joined with the the corresponding row in `airlines` with the same value for `airlines$carrier`. --- There are multiple ways of performing this operation that differ on how non-matching observations are handled. --- ## Left Join In a `left join`, all observations on left operand (LHS) are retained: .image-50.left[![](img/join_lhs.png)] .image-30.center[![](img/left_join.png)] --- Other operations: - _right join_: all observations in RHS are retained - _outer join_: all observations are retained (full join) - _inner join_: only matching observations are retained Details in lecture notes --- ## Join conditions All join operations are based on a matching condition: ```{r, eval=FALSE} flights %>% inner_join(airlines, by="carrier") ``` specifies to join observations where `flights$carrier` equals `airlines$carrier`. --- In this case, where no conditions are specified using the `by` argument: ```{r, eval=FALSE} flights %>% left_join(airlines) ``` a *natural join* is perfomed. In this case all variables with the same name in both tables are used in join condition. --- You can also specify join conditions on arbitrary attributes using the `by` argument. ```{r, eval=FALSE} flights %>% left_join(airlines, by=c("carrier" = "name")) ``` --- ## SQL Constructs: Multi-table Queries Key idea: - Do a join to combine multiple tables into an appropriate table - Use **SFW** constructs for single-table queries -- For the first part, where we use a join to get an appropriate table, the general SQL construct includes: - The name of the first table to join - The _type_ of join to do - The name of the second table to join - The join condition(s) --- ```sql select title, year, me.name as producerName from movies m join movieexec me where m.producer = me.id; ``` --- layout: true ## Entity Resolution and Record Linkage --- Often, we will be faced with the problem of _data integration_: - combine two (or more) datasets from different sources - that may contain information about the same _entities_. -- But,... the _attributes_ in the two datasets may not be the same, -- Worse: values for the _same_ entity may be different in the two datasets. --- ![](img/entityres1.png) -- ![](img/entityres2.png) --- These are examples of a general problem referred to as **Entity Resolution** and **Record Linkage**. --- ### Problem Definition **Given**: Entity sets $E_1$ and $E_2$, **Find**: Linked entities $(e_1,e_2)$ with $e_1 \in E_1$ and $e_2 \in E_2$. --- ### One approach: similarity function - Define a _similarity_ function between entities $e_1$ and $e_2$ - Link entities with high similarity. --- Define similarity as an _additive_ function over some set of shared attributes $A$: $$s(e_1,e_2) = \sum_{j \in A} s_j(e_1[j], e_2[j])$$ with $s_j$ a similarity function defined for _each_ attribute $j$, --- layout: true ## Entity Resolution and Record Linkage ### Example attribute functions --- **Categorical attribute**: pairs of entities with the same value are more similar to each other than pairs of entities with different values. E.g., $$ s_j(e_1[j],e_2[j]) = \begin{cases} 1 & \mathrm{ if } \; e_1[j] == e_2[j] \\ 0 & \mathrm { o.w. } \end{cases} $$ --- **Continuous attribute**: pairs of entities with values that are _close_ to each other are more similar than pairs of entities with values that are _farther_ to each other. Note that to specify _close_ or _far_ we need to introduce some notion of _distance_. We can use Euclidean distance for example, $$ d_j(e_1[j],e_2[j]) = (e_1[j] - e_2[j])^2; \\ s_j(e_1[j],e_2[j]) = e^{-d_j(e_1[j],e_2[j])} $$ --- **Text attributes**: based on _edit distance_ between strings rather than Euclidean distance. We can use domain knowledge to specify similarity. For example, fact that `John` and `Johnathan` are similar requires domain knowledge of common usage of English names. --- layout: true ## Solving the resolution problem --- Need a rule to match entities we think are linked. This depends on assumptions we make about the dataset, similar to assumptions we made when performing joins. --- Model the entity resolution problem as an _optimization_ problem: maximize _objective function_ (based on similarity) over possible sets $V$ of _valid_ pairs $(e_1,e_2)$, where set $V$ constraints pairs based on problem-specific assumptions. $$R = \arg \max_{V} \sum_{(e_1,e_2) \in V} s(e_1,e_2)$$ --- ### Many-to-one resolutions Constrain sets $V$ to represent many-to-one resolutions. Thus, entities in $e_1$ can only appear once in pairs in $V$, but entities $e_2$ may appear more than once. In this case, we can match $(e_1,e_2)$ where $$e_2 = \arg \max_{e \in E_2} s(e_1,e)$$ --- ### One-to-one resolutions Suppose we constrain sets $V$ to those that represent one-to-one resolutions: If $(e_1,e_2) \in V$ then $e_1$ and $e_2$ appear in only one pair in $V$. In this case, we have a harder computational problem. In fact, this is an instance of the _maximum bipartite matching problem_, and would look at network flow algorithms to solve. --- ### Other constraints We can add additional constraints to $V$ to represent other information we have about the task. A common one would be to only allow pairs $(e_1,e_2) \in V$ to have similarity above some threshold $t$. I.e., $(e_1, e_2) \in V$ only if $s(e_1,e_2) \geq t$. --- ### Discussion The procedure outlined above is an excellent first attempt to solve the Entity Resolution problem. This is a classical problem in Data Science for which a variety of approaches and methods are in use. --- layout: true ## Database Query Optimization --- Earlier we made the distinction that SQL is a _declarative_ language rather than a _procedural_ language. A reason why data base systems rely on a declarative language is that it allows the system to decide how to _evaluate_ a query _most efficiently_. --- Consider a Baseball database where we have two tables `Batting` and `Master` _what is the maximum batting "average" for a player from the state of California_? ```{r, echo=FALSE, eval=FALSE} db <- DBI::dbConnect(RSQLite::SQLite(), "data/lahman2016.sqlite") ``` ```sql select max(1.0 * b.H / b.AB) as best_ba from Batting as b join Master as m on b.playerId = m.playerId where b.AB >= 100 and m.birthState = "CA" ``` --- Now, let's do the same computation using `dplyr` operations: ```{r, include=FALSE} library(Lahman) library(tidyverse) ``` Version 1: ```{r, message=FALSE, warning=FALSE} Batting %>% inner_join(Master, by="playerID") %>% filter(AB >= 100, birthState == "CA") %>% mutate(AB=1.0 * H / AB) %>% summarize(max(AB)) ``` --- Version 2: ```{r, message=FALSE, warning=FALSE} Batting %>% filter(AB >= 100) %>% inner_join( filter(Master, birthState == "CA")) %>% mutate(AB = 1.0 * H / AB) %>% summarize(max(AB)) ``` --- Which should be most efficient? Think about a simple _cost_ model. The costliest operation here is the join between two tables. .center.image-80[![](img/inner_join_alg.png)] --- What is the cost of this algorithm? $|T1| \times |T2|$. For the rest of the operations, let's assume we perform this with a single pass through the table. For example, we assume that `filter(T)` has cost $|T|$. --- Let's write out the cost of each of the two pipelines. ```{r, eval=FALSE} Batting %>% inner_join(Master, by="playerID") %>% # cost: |Batting| x |Master| filter(AB >= 100, birthState == "CA") %>% # cost: |R1| mutate(AB=1.0 * H / AB) %>% # cost: |R| summarize(max(AB)) # cost: |R| ``` --- Cost of version 1 is $|\mathrm{Batting}|\times|\mathrm{Master}| + |R1| + 2|R|$ $R1$: inner join between `Batting` and `Master` $R$: is $R1$ filtered to rows with `AB >=100 & birthState == "CA"`. ```{r, echo=FALSE} batting_size <- nrow(Batting) master_size <- nrow(Master) r1 <- Batting %>% inner_join(Master, by="playerID") r1_size <- nrow(r1) r <- filter(r1, AB>=100, birthState == "CA") r_size <- nrow(r) total_cost_v1 <- batting_size * master_size + r1_size + 2*r_size ``` In this example: `r format(total_cost_v1,digits=3,scientific=TRUE)` --- Now, let's look at the second version. ```{r, eval=FALSE} Batting %>% filter(AB >= 100) %>% # cost: |Batting| inner_join( Master %>% filter(birthState == "CA") # cost: |Master| ) %>% # cost: |B1| x |M1| mutate(AB = 1.0 * H / AB) %>% # cost |R| summarize(max(AB)) # cost |R| ``` --- Cost of version 2 is $|\mathrm{Batting}| \times |\mathrm{Master}| + |B1|\times|M1|+2|R|$ $B1$: `Batting` filtered to include only rows with `AB >= 100` $M2$: `Master` filtered to include `birthState == "CA"`. ```{r, echo=FALSE} b1 <- filter(Batting, AB>=100) b1_size <- nrow(b1) m1 <- filter(Master, birthState == "CA") m1_size <- nrow(m1) total_cost_v2 <- batting_size + master_size + b1_size * m1_size + 2*r_size ``` In our example: `r format(total_cost_v2, digits=3,scientific=TRUE)` --- Version 1 (join tables before filtering) is `r format(total_cost_v1 / total_cost_v2, digits=2)` times costlier. When using SQL in a database system we only write the one query describing our desired result, With the _procedural_ (`dplyr`) we need to think which of the two versions is more efficient. --- Database systems use _query optimization_ to decide how to evaluate queries efficiently. The goal of query optimization is to decide the most efficient query _plan_ to use to evaluate a query out of the many possible candidate plans it could use. It needs to solve two problems: search the space of possible plans, approximate the _cost_ of evaluating a specific plan. --- Think of the two procedural versions above as two candidate plans that the DB system _could_ use to evaluate the query. Query optimzation _approximates_ what it would cost to evaluate each of the two plans and decides to use the most efficient plan. ```{r, echo=FALSE, eval=FALSE} DBI::dbDisconnect(db) ``` --- layout: true ## Semi-structured Data Representation Model --- The Entity-Relational data model we have described so far is mostly defined for _structured data_: where a specific and consistent schema is assumed. Data models like XML and JSON are instead intended for *semi-structured* data. --- #### XML: eXtensible Markup Language Data models like XML rely on flexible, self-describing schemas: ```xml <?xml version="1.0" encoding="UTF-8"?> <!-- Edited by XMLSpy --> <CATALOG> <CD> <TITLE>Empire Burlesque</TITLE> <ARTIST><NAME></ARTIST> <COUNTRY>USA</COUNTRY> <COMPANY>Columbia</COMPANY> <PRICE>10.90</PRICE> <YEAR>1985</YEAR> </CD> <CD> <TITLE>Hide your heart</TITLE> <ARTIST>Bonnie Tyler</ARTIST> <COUNTRY>UK</COUNTRY> <COMPANY>CBS Records</COMPANY> <PRICE>9.90</PRICE> <YEAR>1988</YEAR> </CD> ... ``` --- ### JSON: Javascript Object Notation ```json { "firstName": "John", "lastName": "Smith", "isAlive": true, "age": 25, "height_cm": 167.6, "address": { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" }, "phoneNumbers": [ { "type": "home", "number": "212 555-1234" }, { "type": "office", "number": "646 555-4567" } ], "children": [], "spouse": null } ``` --- This is the format most contemporary data REST APIs use to transfer data. For instance, here is part of a JSON record from a Twitter stream: ```json { "created_at":"Sun May 05 14:01:34+00002013", "id":331046012875583488, "id_str":"331046012875583488", "text":"\u0425\u043e\u0447\u0443, \u0447\u0442\u043e\u0431 \u0442\u044b \u0441\u0434\u0435\u043b\u0430\u043b \u0432\u0441\u0451 \u043d\u0435\u043e\u0431\u0445\u043e\u0434\u0438\u043c\u043e\u0435.\n \\,,\\ *_* \/,,\/", "source":"\u003ca href=\"http:\/\/twitterfeed.com\"rel=\"nofollow\"\u003etwitterfeed\u003c\/a\u003e", "in_reply_to_user_id_str":null, "user":{ "id":548422428, "id_str":"548422428", "name":"\u0410\u0439\u0433\u0435\u0440\u0438\u043c \u041f\u043e\u0433\u043e\u0434\u0438\u043d\u0430", "screen_name":"paddybyrny", "location":"\u0420\u043e\u0441\u0441\u0438\u044f;\u0412\u043b\u0430\u0434\u0438\u0432\u043e\u0441\u0442\u043e\u043a", "followers_count":4188, "friends_count":4281, "lang":"en", "profile_background_image_url":"http:\/\/a0.twimg.com\/images\/themes\/theme1\/bg.png", }, "geo":null, "coordinates":null, "entities":{ "hashtags":[],"symbols":[],"urls":[],"user_mentions":[] },"favorited":false,"retweeted":false,"filter_level":"medium","lang":"ru"} ``` --- layout: true ## Summary --- We have looked at specifics of **Data Representation Modeling** - Entity Relationship and Relational Models - Definition of _Tidy Data_ - Joining tables - Entity Resolution - Models for semi-structured data <file_sep>/static/bookdown-notes/setting-up-the-data-science-toolbox.html <!DOCTYPE html> <html lang="" xml:lang=""> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <title>4 Setting up the Data Science Toolbox | Lecture Notes: Introduction to Data Science</title> <meta name="description" content="4 Setting up the Data Science Toolbox | Lecture Notes: Introduction to Data Science" /> <meta name="generator" content="bookdown 0.18 and GitBook 2.6.7" /> <meta property="og:title" content="4 Setting up the Data Science Toolbox | Lecture Notes: Introduction to Data Science" /> <meta property="og:type" content="book" /> <meta name="twitter:card" content="summary" /> <meta name="twitter:title" content="4 Setting up the Data Science Toolbox | Lecture Notes: Introduction to Data Science" /> <meta name="author" content="<NAME>" /> <meta name="date" content="2020-04-26" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-status-bar-style" content="black" /> <link rel="prev" href="an-illustrative-analysis.html"/> <link rel="next" href="part-data-representation-modeling-ingestion-and-cleaning.html"/> <script src="libs/jquery-2.2.3/jquery.min.js"></script> <link href="libs/gitbook-2.6.7/css/style.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-table.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-bookdown.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-highlight.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-search.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-fontsettings.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-clipboard.css" rel="stylesheet" /> <script src="libs/htmlwidgets-1.5.1/htmlwidgets.js"></script> <link href="libs/str_view-0.1.0/str_view.css" rel="stylesheet" /> <script src="libs/str_view-binding-1.4.0/str_view.js"></script> <style type="text/css"> a.sourceLine { display: inline-block; line-height: 1.25; } a.sourceLine { pointer-events: none; color: inherit; text-decoration: inherit; } a.sourceLine:empty { height: 1.2em; position: absolute; } .sourceCode { overflow: visible; } code.sourceCode { white-space: pre; position: relative; } pre.sourceCode { margin: 0; } @media screen { div.sourceCode { overflow: auto; } } @media print { code.sourceCode { white-space: pre-wrap; } a.sourceLine { text-indent: -1em; padding-left: 1em; } } pre.numberSource a.sourceLine { position: relative; } pre.numberSource a.sourceLine:empty { position: absolute; } pre.numberSource a.sourceLine::before { content: attr(data-line-number); position: absolute; left: -5em; text-align: right; vertical-align: baseline; border: none; pointer-events: all; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; padding: 0 4px; width: 4em; color: #aaaaaa; } pre.numberSource { margin-left: 3em; border-left: 1px solid #aaaaaa; padding-left: 4px; } div.sourceCode { } @media screen { a.sourceLine::before { text-decoration: underline; } } code span.al { color: #ff0000; font-weight: bold; } /* Alert */ code span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */ code span.at { color: #7d9029; } /* Attribute */ code span.bn { color: #40a070; } /* BaseN */ code span.bu { } /* BuiltIn */ code span.cf { color: #007020; font-weight: bold; } /* ControlFlow */ code span.ch { color: #4070a0; } /* Char */ code span.cn { color: #880000; } /* Constant */ code span.co { color: #60a0b0; font-style: italic; } /* Comment */ code span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */ code span.do { color: #ba2121; font-style: italic; } /* Documentation */ code span.dt { color: #902000; } /* DataType */ code span.dv { color: #40a070; } /* DecVal */ code span.er { color: #ff0000; font-weight: bold; } /* Error */ code span.ex { } /* Extension */ code span.fl { color: #40a070; } /* Float */ code span.fu { color: #06287e; } /* Function */ code span.im { } /* Import */ code span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */ code span.kw { color: #007020; font-weight: bold; } /* Keyword */ code span.op { color: #666666; } /* Operator */ code span.ot { color: #007020; } /* Other */ code span.pp { color: #bc7a00; } /* Preprocessor */ code span.sc { color: #4070a0; } /* SpecialChar */ code span.ss { color: #bb6688; } /* SpecialString */ code span.st { color: #4070a0; } /* String */ code span.va { color: #19177c; } /* Variable */ code span.vs { color: #4070a0; } /* VerbatimString */ code span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */ </style> </head> <body> <div class="book without-animation with-summary font-size-2 font-family-1" data-basepath="."> <div class="book-summary"> <nav role="navigation"> <ul class="summary"> <li><a href="http://bit.ly/hcb-ids">CMSC320 Intro. Data Science</a></li> <li><a href="http://www.hcbravo.org"><NAME></a></li> <li class="divider"></li> <li class="chapter" data-level="1" data-path="index.html"><a href="index.html"><i class="fa fa-check"></i><b>1</b> Preamble</a></li> <li class="chapter" data-level="2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html"><i class="fa fa-check"></i><b>2</b> Introduction and Overview</a><ul> <li class="chapter" data-level="2.1" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#what-is-data-science"><i class="fa fa-check"></i><b>2.1</b> What is Data Science?</a><ul> <li class="chapter" data-level="2.1.1" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data"><i class="fa fa-check"></i><b>2.1.1</b> Data</a></li> <li class="chapter" data-level="2.1.2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#specific-questions"><i class="fa fa-check"></i><b>2.1.2</b> Specific Questions</a></li> <li class="chapter" data-level="2.1.3" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#interdisciplinary-activities"><i class="fa fa-check"></i><b>2.1.3</b> Interdisciplinary Activities</a></li> <li class="chapter" data-level="2.1.4" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data-centric-artifacts-and-applications"><i class="fa fa-check"></i><b>2.1.4</b> Data-Centric Artifacts and Applications</a></li> </ul></li> <li class="chapter" data-level="2.2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#why-data-science"><i class="fa fa-check"></i><b>2.2</b> Why Data Science?</a></li> <li class="chapter" data-level="2.3" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data-science-in-society"><i class="fa fa-check"></i><b>2.3</b> Data Science in Society</a></li> <li class="chapter" data-level="2.4" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#course-organization"><i class="fa fa-check"></i><b>2.4</b> Course Organization</a></li> <li class="chapter" data-level="2.5" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#general-workflow"><i class="fa fa-check"></i><b>2.5</b> General Workflow</a><ul> <li class="chapter" data-level="2.5.1" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#defining-the-goal"><i class="fa fa-check"></i><b>2.5.1</b> Defining the Goal</a></li> <li class="chapter" data-level="2.5.2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data-collection-and-management"><i class="fa fa-check"></i><b>2.5.2</b> Data Collection and Management</a></li> <li class="chapter" data-level="2.5.3" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#modeling"><i class="fa fa-check"></i><b>2.5.3</b> Modeling</a></li> <li class="chapter" data-level="2.5.4" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#model-evaluation"><i class="fa fa-check"></i><b>2.5.4</b> Model Evaluation</a></li> <li class="chapter" data-level="2.5.5" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#presentation"><i class="fa fa-check"></i><b>2.5.5</b> Presentation</a></li> <li class="chapter" data-level="2.5.6" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#deployment"><i class="fa fa-check"></i><b>2.5.6</b> Deployment</a></li> </ul></li> </ul></li> <li class="chapter" data-level="3" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html"><i class="fa fa-check"></i><b>3</b> An Illustrative Analysis</a><ul> <li class="chapter" data-level="3.1" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#gathering-data"><i class="fa fa-check"></i><b>3.1</b> Gathering data</a><ul> <li class="chapter" data-level="3.1.1" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#movie-ratings"><i class="fa fa-check"></i><b>3.1.1</b> Movie ratings</a></li> <li class="chapter" data-level="3.1.2" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#movie-budgets-and-revenue"><i class="fa fa-check"></i><b>3.1.2</b> Movie budgets and revenue</a></li> </ul></li> <li class="chapter" data-level="3.2" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#manipulating-the-data"><i class="fa fa-check"></i><b>3.2</b> Manipulating the data</a></li> <li class="chapter" data-level="3.3" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#visualizing-the-data"><i class="fa fa-check"></i><b>3.3</b> Visualizing the data</a></li> <li class="chapter" data-level="3.4" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#modeling-data"><i class="fa fa-check"></i><b>3.4</b> Modeling data</a></li> <li class="chapter" data-level="3.5" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#visualizing-model-result"><i class="fa fa-check"></i><b>3.5</b> Visualizing model result</a></li> <li class="chapter" data-level="3.6" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#abstracting-the-analysis"><i class="fa fa-check"></i><b>3.6</b> Abstracting the analysis</a></li> <li class="chapter" data-level="3.7" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#making-analyses-accessible"><i class="fa fa-check"></i><b>3.7</b> Making analyses accessible</a></li> <li class="chapter" data-level="3.8" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#summary"><i class="fa fa-check"></i><b>3.8</b> Summary</a></li> </ul></li> <li class="chapter" data-level="4" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html"><i class="fa fa-check"></i><b>4</b> Setting up the Data Science Toolbox</a><ul> <li class="chapter" data-level="4.1" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#rrstudio"><i class="fa fa-check"></i><b>4.1</b> R/Rstudio</a><ul> <li class="chapter" data-level="4.1.1" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#some-history"><i class="fa fa-check"></i><b>4.1.1</b> Some history</a></li> <li class="chapter" data-level="4.1.2" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#setting-up-r"><i class="fa fa-check"></i><b>4.1.2</b> Setting up R</a></li> <li class="chapter" data-level="4.1.3" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#setting-up-rstudio"><i class="fa fa-check"></i><b>4.1.3</b> Setting up Rstudio</a></li> <li class="chapter" data-level="4.1.4" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#a-first-look-at-rstudio"><i class="fa fa-check"></i><b>4.1.4</b> A first look at Rstudio</a></li> <li class="chapter" data-level="4.1.5" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#interactive-console"><i class="fa fa-check"></i><b>4.1.5</b> Interactive Console</a></li> <li class="chapter" data-level="4.1.6" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#data-viewer"><i class="fa fa-check"></i><b>4.1.6</b> Data Viewer</a></li> <li class="chapter" data-level="4.1.7" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#names-values-and-functions"><i class="fa fa-check"></i><b>4.1.7</b> Names, values and functions</a></li> <li class="chapter" data-level="4.1.8" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#plotting"><i class="fa fa-check"></i><b>4.1.8</b> Plotting</a></li> <li class="chapter" data-level="4.1.9" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#editor"><i class="fa fa-check"></i><b>4.1.9</b> Editor</a></li> <li class="chapter" data-level="4.1.10" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#files-viewer"><i class="fa fa-check"></i><b>4.1.10</b> Files viewer</a></li> <li class="chapter" data-level="4.1.11" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#r-packages"><i class="fa fa-check"></i><b>4.1.11</b> R packages</a></li> <li class="chapter" data-level="4.1.12" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#additional-r-resources"><i class="fa fa-check"></i><b>4.1.12</b> Additional R resources</a></li> <li class="chapter" data-level="4.1.13" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#literate-programming"><i class="fa fa-check"></i><b>4.1.13</b> Literate Programming</a></li> <li class="chapter" data-level="4.1.14" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#course-packages"><i class="fa fa-check"></i><b>4.1.14</b> Course packages</a></li> </ul></li> <li class="chapter" data-level="4.2" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#pythonjupyter"><i class="fa fa-check"></i><b>4.2</b> Python/Jupyter</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-data-representation-modeling-ingestion-and-cleaning.html"><a href="part-data-representation-modeling-ingestion-and-cleaning.html"><i class="fa fa-check"></i>(Part) Data representation modeling, ingestion and cleaning</a></li> <li class="chapter" data-level="5" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html"><i class="fa fa-check"></i><b>5</b> Measurements and Data Types</a><ul> <li class="chapter" data-level="5.1" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#a-data-analysis-to-get-us-going"><i class="fa fa-check"></i><b>5.1</b> A data analysis to get us going</a></li> <li class="chapter" data-level="5.2" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#getting-data"><i class="fa fa-check"></i><b>5.2</b> Getting data</a><ul> <li class="chapter" data-level="5.2.1" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#names-values-and-functions-1"><i class="fa fa-check"></i><b>5.2.1</b> Names, values and functions</a></li> </ul></li> <li class="chapter" data-level="5.3" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#entities-and-attributes"><i class="fa fa-check"></i><b>5.3</b> Entities and attributes</a></li> <li class="chapter" data-level="5.4" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#categorical-attributes"><i class="fa fa-check"></i><b>5.4</b> Categorical attributes</a><ul> <li class="chapter" data-level="5.4.1" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#factors-in-r"><i class="fa fa-check"></i><b>5.4.1</b> Factors in R</a></li> </ul></li> <li class="chapter" data-level="5.5" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#discrete-numeric-attributes"><i class="fa fa-check"></i><b>5.5</b> Discrete numeric attributes</a></li> <li class="chapter" data-level="5.6" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#continuous-numeric-data"><i class="fa fa-check"></i><b>5.6</b> Continuous numeric data</a></li> <li class="chapter" data-level="5.7" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#other-examples"><i class="fa fa-check"></i><b>5.7</b> Other examples</a></li> <li class="chapter" data-level="5.8" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#other-important-datatypes"><i class="fa fa-check"></i><b>5.8</b> Other important datatypes</a></li> <li class="chapter" data-level="5.9" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#units"><i class="fa fa-check"></i><b>5.9</b> Units</a></li> <li class="chapter" data-level="5.10" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#quick-questions"><i class="fa fa-check"></i><b>5.10</b> Quick questions</a></li> </ul></li> <li class="chapter" data-level="6" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html"><i class="fa fa-check"></i><b>6</b> Principles: Basic Operations</a><ul> <li class="chapter" data-level="6.1" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#operations-that-subset-attributes"><i class="fa fa-check"></i><b>6.1</b> Operations that subset attributes</a><ul> <li class="chapter" data-level="6.1.1" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#select"><i class="fa fa-check"></i><b>6.1.1</b> <code>select</code></a></li> <li class="chapter" data-level="6.1.2" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#rename"><i class="fa fa-check"></i><b>6.1.2</b> <code>rename</code></a></li> </ul></li> <li class="chapter" data-level="6.2" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#operations-that-subset-entities"><i class="fa fa-check"></i><b>6.2</b> Operations that subset entities</a><ul> <li class="chapter" data-level="6.2.1" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#slice"><i class="fa fa-check"></i><b>6.2.1</b> <code>slice</code></a></li> <li class="chapter" data-level="6.2.2" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#filter"><i class="fa fa-check"></i><b>6.2.2</b> <code>filter</code></a></li> <li class="chapter" data-level="6.2.3" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#sample_n-and-sample_frac"><i class="fa fa-check"></i><b>6.2.3</b> <code>sample_n</code> and <code>sample_frac</code></a></li> </ul></li> <li class="chapter" data-level="6.3" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#pipelines-of-operations"><i class="fa fa-check"></i><b>6.3</b> Pipelines of operations</a></li> </ul></li> <li class="chapter" data-level="7" data-path="principles-more-operations.html"><a href="principles-more-operations.html"><i class="fa fa-check"></i><b>7</b> Principles: More Operations</a><ul> <li class="chapter" data-level="7.1" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-sort-entities"><i class="fa fa-check"></i><b>7.1</b> Operations that sort entities</a></li> <li class="chapter" data-level="7.2" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-create-new-attributes"><i class="fa fa-check"></i><b>7.2</b> Operations that create new attributes</a></li> <li class="chapter" data-level="7.3" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-summarize-attribute-values-over-entities"><i class="fa fa-check"></i><b>7.3</b> Operations that summarize attribute values over entities</a></li> <li class="chapter" data-level="7.4" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-group-entities"><i class="fa fa-check"></i><b>7.4</b> Operations that group entities</a></li> <li class="chapter" data-level="7.5" data-path="principles-more-operations.html"><a href="principles-more-operations.html#vectors"><i class="fa fa-check"></i><b>7.5</b> Vectors</a></li> <li class="chapter" data-level="7.6" data-path="principles-more-operations.html"><a href="principles-more-operations.html#attributes-as-vectors"><i class="fa fa-check"></i><b>7.6</b> Attributes as vectors</a></li> <li class="chapter" data-level="7.7" data-path="principles-more-operations.html"><a href="principles-more-operations.html#functions"><i class="fa fa-check"></i><b>7.7</b> Functions</a></li> </ul></li> <li class="chapter" data-level="8" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html"><i class="fa fa-check"></i><b>8</b> Basic plotting with <code>ggplot</code></a><ul> <li class="chapter" data-level="8.1" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#plot-construction-details"><i class="fa fa-check"></i><b>8.1</b> Plot Construction Details</a><ul> <li class="chapter" data-level="8.1.1" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#mappings"><i class="fa fa-check"></i><b>8.1.1</b> Mappings</a></li> <li class="chapter" data-level="8.1.2" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#representations"><i class="fa fa-check"></i><b>8.1.2</b> Representations</a></li> </ul></li> <li class="chapter" data-level="8.2" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#frequently-used-plots"><i class="fa fa-check"></i><b>8.2</b> Frequently Used Plots</a><ul> <li class="chapter" data-level="8.2.1" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#scatter-plot"><i class="fa fa-check"></i><b>8.2.1</b> Scatter plot</a></li> <li class="chapter" data-level="8.2.2" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#bar-graph"><i class="fa fa-check"></i><b>8.2.2</b> Bar graph</a></li> <li class="chapter" data-level="8.2.3" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#histogram"><i class="fa fa-check"></i><b>8.2.3</b> Histogram</a></li> <li class="chapter" data-level="8.2.4" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#boxplot"><i class="fa fa-check"></i><b>8.2.4</b> Boxplot</a></li> </ul></li> </ul></li> <li class="chapter" data-level="9" data-path="brief-introduction-to-rmarkdown.html"><a href="brief-introduction-to-rmarkdown.html"><i class="fa fa-check"></i><b>9</b> Brief Introduction to Rmarkdown</a></li> <li class="chapter" data-level="10" data-path="best-practices-for-data-science-projects.html"><a href="best-practices-for-data-science-projects.html"><i class="fa fa-check"></i><b>10</b> Best Practices for Data Science Projects</a></li> <li class="chapter" data-level="11" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html"><i class="fa fa-check"></i><b>11</b> Tidy Data I: The ER Model</a><ul> <li class="chapter" data-level="11.1" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#overview"><i class="fa fa-check"></i><b>11.1</b> Overview</a></li> <li class="chapter" data-level="11.2" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#the-entity-relationship-and-relational-models"><i class="fa fa-check"></i><b>11.2</b> The Entity-Relationship and Relational Models</a><ul> <li class="chapter" data-level="11.2.1" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#formal-introduction-to-keys"><i class="fa fa-check"></i><b>11.2.1</b> Formal introduction to keys</a></li> </ul></li> <li class="chapter" data-level="11.3" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#tidy-data"><i class="fa fa-check"></i><b>11.3</b> Tidy Data</a></li> </ul></li> <li class="chapter" data-level="12" data-path="sql-i-single-table-queries.html"><a href="sql-i-single-table-queries.html"><i class="fa fa-check"></i><b>12</b> SQL I: Single Table Queries</a><ul> <li class="chapter" data-level="12.1" data-path="sql-i-single-table-queries.html"><a href="sql-i-single-table-queries.html#group-by-and-summarize"><i class="fa fa-check"></i><b>12.1</b> Group-by and summarize</a></li> <li class="chapter" data-level="12.2" data-path="sql-i-single-table-queries.html"><a href="sql-i-single-table-queries.html#subqueries"><i class="fa fa-check"></i><b>12.2</b> Subqueries</a></li> </ul></li> <li class="chapter" data-level="13" data-path="two-table-operations.html"><a href="two-table-operations.html"><i class="fa fa-check"></i><b>13</b> Two-table operations</a><ul> <li class="chapter" data-level="13.1" data-path="two-table-operations.html"><a href="two-table-operations.html#left-join"><i class="fa fa-check"></i><b>13.1</b> Left Join</a></li> <li class="chapter" data-level="13.2" data-path="two-table-operations.html"><a href="two-table-operations.html#right-join"><i class="fa fa-check"></i><b>13.2</b> Right Join</a></li> <li class="chapter" data-level="13.3" data-path="two-table-operations.html"><a href="two-table-operations.html#inner-join"><i class="fa fa-check"></i><b>13.3</b> Inner Join</a></li> <li class="chapter" data-level="13.4" data-path="two-table-operations.html"><a href="two-table-operations.html#full-join"><i class="fa fa-check"></i><b>13.4</b> Full Join</a></li> <li class="chapter" data-level="13.5" data-path="two-table-operations.html"><a href="two-table-operations.html#join-conditions"><i class="fa fa-check"></i><b>13.5</b> Join conditions</a></li> <li class="chapter" data-level="13.6" data-path="two-table-operations.html"><a href="two-table-operations.html#filtering-joins"><i class="fa fa-check"></i><b>13.6</b> Filtering Joins</a></li> <li class="chapter" data-level="13.7" data-path="two-table-operations.html"><a href="two-table-operations.html#sql-constructs-multi-table-queries"><i class="fa fa-check"></i><b>13.7</b> SQL Constructs: Multi-table Queries</a></li> </ul></li> <li class="chapter" data-level="14" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html"><i class="fa fa-check"></i><b>14</b> SQL System Constructs</a><ul> <li class="chapter" data-level="14.1" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#sql-as-a-data-definition-language"><i class="fa fa-check"></i><b>14.1</b> SQL as a Data Definition Language</a></li> <li class="chapter" data-level="14.2" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#set-operations-and-comparisons"><i class="fa fa-check"></i><b>14.2</b> Set Operations and Comparisons</a></li> <li class="chapter" data-level="14.3" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#views"><i class="fa fa-check"></i><b>14.3</b> Views</a></li> <li class="chapter" data-level="14.4" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#nulls"><i class="fa fa-check"></i><b>14.4</b> NULLs</a></li> </ul></li> <li class="chapter" data-level="15" data-path="db-parting-shots.html"><a href="db-parting-shots.html"><i class="fa fa-check"></i><b>15</b> DB Parting Shots</a><ul> <li class="chapter" data-level="15.1" data-path="db-parting-shots.html"><a href="db-parting-shots.html#database-query-optimization"><i class="fa fa-check"></i><b>15.1</b> Database Query Optimization</a></li> <li class="chapter" data-level="15.2" data-path="db-parting-shots.html"><a href="db-parting-shots.html#json-data-model"><i class="fa fa-check"></i><b>15.2</b> JSON Data Model</a></li> </ul></li> <li class="chapter" data-level="16" data-path="ingesting-data.html"><a href="ingesting-data.html"><i class="fa fa-check"></i><b>16</b> Ingesting data</a><ul> <li class="chapter" data-level="16.1" data-path="ingesting-data.html"><a href="ingesting-data.html#structured-ingestion"><i class="fa fa-check"></i><b>16.1</b> Structured ingestion</a><ul> <li class="chapter" data-level="16.1.1" data-path="ingesting-data.html"><a href="ingesting-data.html#csv-files-and-similar"><i class="fa fa-check"></i><b>16.1.1</b> CSV files (and similar)</a></li> <li class="chapter" data-level="16.1.2" data-path="ingesting-data.html"><a href="ingesting-data.html#excel-spreadsheets"><i class="fa fa-check"></i><b>16.1.2</b> Excel spreadsheets</a></li> </ul></li> <li class="chapter" data-level="16.2" data-path="ingesting-data.html"><a href="ingesting-data.html#scraping"><i class="fa fa-check"></i><b>16.2</b> Scraping</a><ul> <li class="chapter" data-level="16.2.1" data-path="ingesting-data.html"><a href="ingesting-data.html#scraping-from-dirty-html-tables"><i class="fa fa-check"></i><b>16.2.1</b> Scraping from dirty HTML tables</a></li> </ul></li> </ul></li> <li class="chapter" data-level="17" data-path="tidying-data.html"><a href="tidying-data.html"><i class="fa fa-check"></i><b>17</b> Tidying data</a><ul> <li class="chapter" data-level="17.1" data-path="tidying-data.html"><a href="tidying-data.html#tidy-data-1"><i class="fa fa-check"></i><b>17.1</b> Tidy Data</a></li> <li class="chapter" data-level="17.2" data-path="tidying-data.html"><a href="tidying-data.html#common-problems-in-messy-data"><i class="fa fa-check"></i><b>17.2</b> Common problems in messy data</a><ul> <li class="chapter" data-level="17.2.1" data-path="tidying-data.html"><a href="tidying-data.html#headers-as-values"><i class="fa fa-check"></i><b>17.2.1</b> Headers as values</a></li> <li class="chapter" data-level="17.2.2" data-path="tidying-data.html"><a href="tidying-data.html#multiple-variables-in-one-column"><i class="fa fa-check"></i><b>17.2.2</b> Multiple variables in one column</a></li> <li class="chapter" data-level="17.2.3" data-path="tidying-data.html"><a href="tidying-data.html#variables-stored-in-both-rows-and-columns"><i class="fa fa-check"></i><b>17.2.3</b> Variables stored in both rows and columns</a></li> <li class="chapter" data-level="17.2.4" data-path="tidying-data.html"><a href="tidying-data.html#multiple-types-in-one-table"><i class="fa fa-check"></i><b>17.2.4</b> Multiple types in one table</a></li> </ul></li> </ul></li> <li class="chapter" data-level="18" data-path="text-and-dates.html"><a href="text-and-dates.html"><i class="fa fa-check"></i><b>18</b> Text and Dates</a><ul> <li class="chapter" data-level="18.1" data-path="text-and-dates.html"><a href="text-and-dates.html#text"><i class="fa fa-check"></i><b>18.1</b> Text</a><ul> <li class="chapter" data-level="18.1.1" data-path="text-and-dates.html"><a href="text-and-dates.html#string-operations"><i class="fa fa-check"></i><b>18.1.1</b> String operations</a></li> <li class="chapter" data-level="18.1.2" data-path="text-and-dates.html"><a href="text-and-dates.html#regular-expressions"><i class="fa fa-check"></i><b>18.1.2</b> Regular expressions</a></li> <li class="chapter" data-level="18.1.3" data-path="text-and-dates.html"><a href="text-and-dates.html#tools-using-regular-expressions"><i class="fa fa-check"></i><b>18.1.3</b> Tools using regular expressions</a></li> <li class="chapter" data-level="18.1.4" data-path="text-and-dates.html"><a href="text-and-dates.html#extracting-attributes-from-text"><i class="fa fa-check"></i><b>18.1.4</b> Extracting attributes from text</a></li> </ul></li> <li class="chapter" data-level="18.2" data-path="text-and-dates.html"><a href="text-and-dates.html#handling-dates"><i class="fa fa-check"></i><b>18.2</b> Handling dates</a></li> </ul></li> <li class="chapter" data-level="19" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html"><i class="fa fa-check"></i><b>19</b> Entity Resolution and Record Linkage</a><ul> <li class="chapter" data-level="19.1" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#problem-definition"><i class="fa fa-check"></i><b>19.1</b> Problem Definition</a></li> <li class="chapter" data-level="19.2" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#one-approach-similarity-function"><i class="fa fa-check"></i><b>19.2</b> One approach: similarity function</a><ul> <li class="chapter" data-level="19.2.1" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#example-attribute-functions"><i class="fa fa-check"></i><b>19.2.1</b> Example attribute functions</a></li> </ul></li> <li class="chapter" data-level="19.3" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#solving-the-resolution-problem"><i class="fa fa-check"></i><b>19.3</b> Solving the resolution problem</a><ul> <li class="chapter" data-level="19.3.1" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#many-to-one-resolutions"><i class="fa fa-check"></i><b>19.3.1</b> Many-to-one resolutions</a></li> <li class="chapter" data-level="19.3.2" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#one-to-one-resolutions"><i class="fa fa-check"></i><b>19.3.2</b> One-to-one resolutions</a></li> <li class="chapter" data-level="19.3.3" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#other-constraints"><i class="fa fa-check"></i><b>19.3.3</b> Other constraints</a></li> </ul></li> <li class="chapter" data-level="19.4" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#discussion"><i class="fa fa-check"></i><b>19.4</b> Discussion</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-exploratory-data-analysis.html"><a href="part-exploratory-data-analysis.html"><i class="fa fa-check"></i>(Part) Exploratory Data Analysis</a></li> <li class="chapter" data-level="20" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html"><i class="fa fa-check"></i><b>20</b> Exploratory Data Analysis: Visualization</a><ul> <li class="chapter" data-level="20.0.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#eda-exploratory-data-analysis"><i class="fa fa-check"></i><b>20.0.1</b> EDA (Exploratory Data Analysis)</a></li> <li class="chapter" data-level="20.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#visualization-of-single-variables"><i class="fa fa-check"></i><b>20.1</b> Visualization of single variables</a><ul> <li class="chapter" data-level="20.1.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#visualization-of-pairs-of-variables"><i class="fa fa-check"></i><b>20.1.1</b> Visualization of pairs of variables</a></li> </ul></li> <li class="chapter" data-level="20.2" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#eda-with-the-grammar-of-graphics"><i class="fa fa-check"></i><b>20.2</b> EDA with the grammar of graphics</a><ul> <li class="chapter" data-level="20.2.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#other-aesthetics"><i class="fa fa-check"></i><b>20.2.1</b> Other aesthetics</a></li> <li class="chapter" data-level="20.2.2" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#faceting"><i class="fa fa-check"></i><b>20.2.2</b> Faceting</a></li> </ul></li> </ul></li> <li class="chapter" data-level="21" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html"><i class="fa fa-check"></i><b>21</b> Exploratory Data Analysis: Summary Statistics</a><ul> <li class="chapter" data-level="21.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#range"><i class="fa fa-check"></i><b>21.1</b> Range</a></li> <li class="chapter" data-level="21.2" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#central-tendency"><i class="fa fa-check"></i><b>21.2</b> Central Tendency</a><ul> <li class="chapter" data-level="21.2.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#derivation-of-the-mean-as-central-tendency-statistic"><i class="fa fa-check"></i><b>21.2.1</b> Derivation of the mean as central tendency statistic</a></li> </ul></li> <li class="chapter" data-level="21.3" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#spread"><i class="fa fa-check"></i><b>21.3</b> Spread</a><ul> <li class="chapter" data-level="21.3.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#variance"><i class="fa fa-check"></i><b>21.3.1</b> Variance</a></li> <li class="chapter" data-level="21.3.2" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#spread-estimates-using-rank-statistics"><i class="fa fa-check"></i><b>21.3.2</b> Spread estimates using rank statistics</a></li> </ul></li> <li class="chapter" data-level="21.4" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#outliers"><i class="fa fa-check"></i><b>21.4</b> Outliers</a></li> <li class="chapter" data-level="21.5" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#skew"><i class="fa fa-check"></i><b>21.5</b> Skew</a></li> <li class="chapter" data-level="21.6" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#covariance-and-correlation"><i class="fa fa-check"></i><b>21.6</b> Covariance and correlation</a></li> <li class="chapter" data-level="21.7" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#postscript-finding-maximaminima-using-derivatives"><i class="fa fa-check"></i><b>21.7</b> Postscript: Finding Maxima/Minima using Derivatives</a><ul> <li class="chapter" data-level="21.7.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#steps-to-find-maximaminima-of-function-fx"><i class="fa fa-check"></i><b>21.7.1</b> Steps to find Maxima/Minima of function <span class="math inline">\(f(x)\)</span></a></li> <li class="chapter" data-level="21.7.2" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#notes-on-finding-derivatives"><i class="fa fa-check"></i><b>21.7.2</b> Notes on Finding Derivatives</a></li> <li class="chapter" data-level="21.7.3" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#resources"><i class="fa fa-check"></i><b>21.7.3</b> Resources:</a></li> </ul></li> </ul></li> <li class="chapter" data-level="22" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html"><i class="fa fa-check"></i><b>22</b> EDA: Data Transformations</a><ul> <li class="chapter" data-level="22.1" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#centering-and-scaling"><i class="fa fa-check"></i><b>22.1</b> Centering and scaling</a></li> <li class="chapter" data-level="22.2" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#treating-categorical-variables-as-numeric"><i class="fa fa-check"></i><b>22.2</b> Treating categorical variables as numeric</a><ul> <li class="chapter" data-level="22.2.1" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#discretizing-continuous-values."><i class="fa fa-check"></i><b>22.2.1</b> Discretizing continuous values.</a></li> </ul></li> <li class="chapter" data-level="22.3" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#skewed-data"><i class="fa fa-check"></i><b>22.3</b> Skewed Data</a></li> </ul></li> <li class="chapter" data-level="23" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html"><i class="fa fa-check"></i><b>23</b> EDA: Handling Missing Data</a><ul> <li class="chapter" data-level="23.1" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#mechanisms-of-missing-data"><i class="fa fa-check"></i><b>23.1</b> Mechanisms of missing data</a></li> <li class="chapter" data-level="23.2" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#handling-missing-data"><i class="fa fa-check"></i><b>23.2</b> Handling missing data</a><ul> <li class="chapter" data-level="23.2.1" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#removing-missing-data"><i class="fa fa-check"></i><b>23.2.1</b> Removing missing data</a></li> <li class="chapter" data-level="23.2.2" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#encoding-as-missing"><i class="fa fa-check"></i><b>23.2.2</b> Encoding as missing</a></li> <li class="chapter" data-level="23.2.3" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#imputation"><i class="fa fa-check"></i><b>23.2.3</b> Imputation</a></li> </ul></li> <li class="chapter" data-level="23.3" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#implications-of-imputation"><i class="fa fa-check"></i><b>23.3</b> Implications of imputation</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-statistical-learning.html"><a href="part-statistical-learning.html"><i class="fa fa-check"></i>(Part) Statistical Learning</a></li> <li class="chapter" data-level="24" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html"><i class="fa fa-check"></i><b>24</b> Univariate distributions and statistics</a><ul> <li class="chapter" data-level="24.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#variation-randomness-and-stochasticity"><i class="fa fa-check"></i><b>24.1</b> Variation, randomness and stochasticity</a><ul> <li class="chapter" data-level="24.1.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#random-variables"><i class="fa fa-check"></i><b>24.1.1</b> Random variables</a></li> </ul></li> <li class="chapter" data-level="24.2" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#discrete-probability-distributions"><i class="fa fa-check"></i><b>24.2</b> (Discrete) Probability distributions</a><ul> <li class="chapter" data-level="24.2.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#example-the-oracle-of-tweet"><i class="fa fa-check"></i><b>24.2.1</b> Example The oracle of TWEET</a></li> </ul></li> <li class="chapter" data-level="24.3" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#expectation"><i class="fa fa-check"></i><b>24.3</b> Expectation</a></li> <li class="chapter" data-level="24.4" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#estimation"><i class="fa fa-check"></i><b>24.4</b> Estimation</a><ul> <li class="chapter" data-level="24.4.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#law-of-large-numbers-lln"><i class="fa fa-check"></i><b>24.4.1</b> Law of large numbers (LLN)</a></li> <li class="chapter" data-level="24.4.2" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#central-limit-theorem-clt"><i class="fa fa-check"></i><b>24.4.2</b> Central Limit Theorem (CLT)</a></li> </ul></li> <li class="chapter" data-level="24.5" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#the-normal-distribution"><i class="fa fa-check"></i><b>24.5</b> The normal distribution</a><ul> <li class="chapter" data-level="24.5.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#clt-continued"><i class="fa fa-check"></i><b>24.5.1</b> CLT continued</a></li> </ul></li> <li class="chapter" data-level="24.6" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#the-bootstrap-procedure"><i class="fa fa-check"></i><b>24.6</b> The Bootstrap Procedure</a></li> </ul></li> <li class="chapter" data-level="25" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html"><i class="fa fa-check"></i><b>25</b> Experiment design and hypothesis testing</a><ul> <li class="chapter" data-level="25.1" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#inference"><i class="fa fa-check"></i><b>25.1</b> Inference</a><ul> <li class="chapter" data-level="25.1.1" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#hypothesis-testing"><i class="fa fa-check"></i><b>25.1.1</b> Hypothesis testing</a></li> </ul></li> <li class="chapter" data-level="25.2" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#ab-testing"><i class="fa fa-check"></i><b>25.2</b> A/B Testing</a></li> <li class="chapter" data-level="25.3" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#summary-1"><i class="fa fa-check"></i><b>25.3</b> Summary</a></li> <li class="chapter" data-level="25.4" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#probability-distributions"><i class="fa fa-check"></i><b>25.4</b> Probability Distributions</a><ul> <li class="chapter" data-level="25.4.1" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#bernoulli"><i class="fa fa-check"></i><b>25.4.1</b> Bernoulli</a></li> <li class="chapter" data-level="25.4.2" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#binomial"><i class="fa fa-check"></i><b>25.4.2</b> Binomial</a></li> <li class="chapter" data-level="25.4.3" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#normal-gaussian-distribution"><i class="fa fa-check"></i><b>25.4.3</b> Normal (Gaussian) distribution</a></li> <li class="chapter" data-level="25.4.4" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#distributions-in-r"><i class="fa fa-check"></i><b>25.4.4</b> Distributions in R</a></li> </ul></li> </ul></li> <li class="chapter" data-level="26" data-path="multivariate-probability.html"><a href="multivariate-probability.html"><i class="fa fa-check"></i><b>26</b> Multivariate probability</a><ul> <li class="chapter" data-level="26.1" data-path="multivariate-probability.html"><a href="multivariate-probability.html#joint-and-conditional-probability"><i class="fa fa-check"></i><b>26.1</b> Joint and conditional probability</a></li> <li class="chapter" data-level="26.2" data-path="multivariate-probability.html"><a href="multivariate-probability.html#bayes-rule"><i class="fa fa-check"></i><b>26.2</b> Bayes’ Rule</a></li> <li class="chapter" data-level="26.3" data-path="multivariate-probability.html"><a href="multivariate-probability.html#conditional-expectation"><i class="fa fa-check"></i><b>26.3</b> Conditional expectation</a></li> <li class="chapter" data-level="26.4" data-path="multivariate-probability.html"><a href="multivariate-probability.html#maximum-likelihood"><i class="fa fa-check"></i><b>26.4</b> Maximum likelihood</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-machine-learning.html"><a href="part-machine-learning.html"><i class="fa fa-check"></i>(Part) Machine Learning</a></li> <li class="chapter" data-level="27" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html"><i class="fa fa-check"></i><b>27</b> Data Analysis with Geometry</a><ul> <li class="chapter" data-level="27.1" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#motivating-example-credit-analysis"><i class="fa fa-check"></i><b>27.1</b> Motivating Example: Credit Analysis</a></li> <li class="chapter" data-level="27.2" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#from-data-to-feature-vectors"><i class="fa fa-check"></i><b>27.2</b> From data to feature vectors</a></li> <li class="chapter" data-level="27.3" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#technical-notation"><i class="fa fa-check"></i><b>27.3</b> Technical notation</a></li> <li class="chapter" data-level="27.4" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#geometry-and-distances"><i class="fa fa-check"></i><b>27.4</b> Geometry and Distances</a><ul> <li class="chapter" data-level="27.4.1" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#k-nearest-neighbor-classification"><i class="fa fa-check"></i><b>27.4.1</b> K-nearest neighbor classification</a></li> <li class="chapter" data-level="27.4.2" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#the-importance-of-transformations"><i class="fa fa-check"></i><b>27.4.2</b> The importance of transformations</a></li> </ul></li> <li class="chapter" data-level="27.5" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#quick-vector-algebra-review"><i class="fa fa-check"></i><b>27.5</b> Quick vector algebra review</a><ul> <li class="chapter" data-level="27.5.1" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#quiz"><i class="fa fa-check"></i><b>27.5.1</b> Quiz</a></li> </ul></li> <li class="chapter" data-level="27.6" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#the-curse-of-dimensionality"><i class="fa fa-check"></i><b>27.6</b> The curse of dimensionality</a></li> <li class="chapter" data-level="27.7" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#summary-2"><i class="fa fa-check"></i><b>27.7</b> Summary</a></li> </ul></li> <li class="chapter" data-level="28" data-path="linear-regression.html"><a href="linear-regression.html"><i class="fa fa-check"></i><b>28</b> Linear Regression</a><ul> <li class="chapter" data-level="28.1" data-path="linear-regression.html"><a href="linear-regression.html#simple-regression"><i class="fa fa-check"></i><b>28.1</b> Simple Regression</a></li> <li class="chapter" data-level="28.2" data-path="linear-regression.html"><a href="linear-regression.html#inference-1"><i class="fa fa-check"></i><b>28.2</b> Inference</a><ul> <li class="chapter" data-level="28.2.1" data-path="linear-regression.html"><a href="linear-regression.html#confidence-interval"><i class="fa fa-check"></i><b>28.2.1</b> Confidence Interval</a></li> <li class="chapter" data-level="28.2.2" data-path="linear-regression.html"><a href="linear-regression.html#the-t-statistic-and-the-t-distribution"><i class="fa fa-check"></i><b>28.2.2</b> The <span class="math inline">\(t\)</span>-statistic and the <span class="math inline">\(t\)</span>-distribution</a></li> <li class="chapter" data-level="28.2.3" data-path="linear-regression.html"><a href="linear-regression.html#global-fit"><i class="fa fa-check"></i><b>28.2.3</b> Global Fit</a></li> </ul></li> <li class="chapter" data-level="28.3" data-path="linear-regression.html"><a href="linear-regression.html#some-important-technicalities"><i class="fa fa-check"></i><b>28.3</b> Some important technicalities</a></li> <li class="chapter" data-level="28.4" data-path="linear-regression.html"><a href="linear-regression.html#issues-with-linear-regression"><i class="fa fa-check"></i><b>28.4</b> Issues with linear regression</a><ul> <li class="chapter" data-level="28.4.1" data-path="linear-regression.html"><a href="linear-regression.html#non-linearity-of-outcome-predictor-relationship"><i class="fa fa-check"></i><b>28.4.1</b> Non-linearity of outcome-predictor relationship</a></li> <li class="chapter" data-level="28.4.2" data-path="linear-regression.html"><a href="linear-regression.html#correlated-error"><i class="fa fa-check"></i><b>28.4.2</b> Correlated Error</a></li> <li class="chapter" data-level="28.4.3" data-path="linear-regression.html"><a href="linear-regression.html#non-constant-variance"><i class="fa fa-check"></i><b>28.4.3</b> Non-constant variance</a></li> </ul></li> <li class="chapter" data-level="28.5" data-path="linear-regression.html"><a href="linear-regression.html#multiple-linear-regression"><i class="fa fa-check"></i><b>28.5</b> Multiple linear regression</a><ul> <li class="chapter" data-level="28.5.1" data-path="linear-regression.html"><a href="linear-regression.html#estimation-in-multivariate-regression"><i class="fa fa-check"></i><b>28.5.1</b> Estimation in multivariate regression</a></li> <li class="chapter" data-level="28.5.2" data-path="linear-regression.html"><a href="linear-regression.html#example-contd"><i class="fa fa-check"></i><b>28.5.2</b> Example (cont’d)</a></li> <li class="chapter" data-level="28.5.3" data-path="linear-regression.html"><a href="linear-regression.html#statistical-statements-contd"><i class="fa fa-check"></i><b>28.5.3</b> Statistical statements (cont’d)</a></li> <li class="chapter" data-level="28.5.4" data-path="linear-regression.html"><a href="linear-regression.html#the-f-test"><i class="fa fa-check"></i><b>28.5.4</b> The F-test</a></li> <li class="chapter" data-level="28.5.5" data-path="linear-regression.html"><a href="linear-regression.html#categorical-predictors-contd"><i class="fa fa-check"></i><b>28.5.5</b> Categorical predictors (cont’d)</a></li> </ul></li> <li class="chapter" data-level="28.6" data-path="linear-regression.html"><a href="linear-regression.html#interactions-in-linear-models"><i class="fa fa-check"></i><b>28.6</b> Interactions in linear models</a><ul> <li class="chapter" data-level="28.6.1" data-path="linear-regression.html"><a href="linear-regression.html#additional-issues-with-linear-regression"><i class="fa fa-check"></i><b>28.6.1</b> Additional issues with linear regression</a></li> </ul></li> </ul></li> <li class="chapter" data-level="29" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html"><i class="fa fa-check"></i><b>29</b> Linear models for classification</a><ul> <li class="chapter" data-level="29.1" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#an-example-classification-problem"><i class="fa fa-check"></i><b>29.1</b> An example classification problem</a></li> <li class="chapter" data-level="29.2" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#why-not-linear-regression"><i class="fa fa-check"></i><b>29.2</b> Why not linear regression?</a></li> <li class="chapter" data-level="29.3" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#classification-as-probability-estimation-problem"><i class="fa fa-check"></i><b>29.3</b> Classification as probability estimation problem</a></li> <li class="chapter" data-level="29.4" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#logistic-regression"><i class="fa fa-check"></i><b>29.4</b> Logistic regression</a><ul> <li class="chapter" data-level="29.4.1" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#exercises"><i class="fa fa-check"></i><b>29.4.1</b> Exercises</a></li> <li class="chapter" data-level="29.4.2" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#making-predictions"><i class="fa fa-check"></i><b>29.4.2</b> Making predictions</a></li> <li class="chapter" data-level="29.4.3" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#multiple-logistic-regression"><i class="fa fa-check"></i><b>29.4.3</b> Multiple logistic regression</a></li> <li class="chapter" data-level="29.4.4" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#exercise"><i class="fa fa-check"></i><b>29.4.4</b> Exercise</a></li> </ul></li> <li class="chapter" data-level="29.5" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#linear-discriminant-analysis"><i class="fa fa-check"></i><b>29.5</b> Linear Discriminant Analysis</a><ul> <li class="chapter" data-level="29.5.1" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#how-to-train-lda"><i class="fa fa-check"></i><b>29.5.1</b> How to train LDA</a></li> </ul></li> <li class="chapter" data-level="29.6" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#summary-3"><i class="fa fa-check"></i><b>29.6</b> Summary</a></li> </ul></li> <li class="chapter" data-level="30" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html"><i class="fa fa-check"></i><b>30</b> Solving linear ML problems</a><ul> <li class="chapter" data-level="30.1" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#case-study"><i class="fa fa-check"></i><b>30.1</b> Case Study</a></li> <li class="chapter" data-level="30.2" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#gradient-descent"><i class="fa fa-check"></i><b>30.2</b> Gradient Descent</a><ul> <li class="chapter" data-level="30.2.1" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#logistic-regression-1"><i class="fa fa-check"></i><b>30.2.1</b> Logistic Regression</a></li> </ul></li> <li class="chapter" data-level="30.3" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#stochastic-gradient-descent"><i class="fa fa-check"></i><b>30.3</b> Stochastic gradient descent</a></li> <li class="chapter" data-level="30.4" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#parallelizing-gradient-descent"><i class="fa fa-check"></i><b>30.4</b> Parallelizing gradient descent</a></li> </ul></li> <li class="chapter" data-level="31" data-path="tree-based-methods.html"><a href="tree-based-methods.html"><i class="fa fa-check"></i><b>31</b> Tree-Based Methods</a><ul> <li class="chapter" data-level="31.1" data-path="tree-based-methods.html"><a href="tree-based-methods.html#regression-trees"><i class="fa fa-check"></i><b>31.1</b> Regression Trees</a></li> <li class="chapter" data-level="31.2" data-path="tree-based-methods.html"><a href="tree-based-methods.html#classification-decision-trees"><i class="fa fa-check"></i><b>31.2</b> Classification (Decision) Trees</a></li> <li class="chapter" data-level="31.3" data-path="tree-based-methods.html"><a href="tree-based-methods.html#specifics-of-the-partitioning-algorithm"><i class="fa fa-check"></i><b>31.3</b> Specifics of the partitioning algorithm</a><ul> <li class="chapter" data-level="31.3.1" data-path="tree-based-methods.html"><a href="tree-based-methods.html#the-predictor-space"><i class="fa fa-check"></i><b>31.3.1</b> The predictor space</a></li> <li class="chapter" data-level="31.3.2" data-path="tree-based-methods.html"><a href="tree-based-methods.html#learning-strategy"><i class="fa fa-check"></i><b>31.3.2</b> Learning Strategy</a></li> <li class="chapter" data-level="31.3.3" data-path="tree-based-methods.html"><a href="tree-based-methods.html#tree-growing"><i class="fa fa-check"></i><b>31.3.3</b> Tree Growing</a></li> <li class="chapter" data-level="31.3.4" data-path="tree-based-methods.html"><a href="tree-based-methods.html#deviance-as-a-measure-of-impurity"><i class="fa fa-check"></i><b>31.3.4</b> Deviance as a measure of impurity</a></li> <li class="chapter" data-level="31.3.5" data-path="tree-based-methods.html"><a href="tree-based-methods.html#other-measures-of-impurity"><i class="fa fa-check"></i><b>31.3.5</b> Other measures of impurity</a></li> <li class="chapter" data-level="31.3.6" data-path="tree-based-methods.html"><a href="tree-based-methods.html#tree-pruning"><i class="fa fa-check"></i><b>31.3.6</b> Tree Pruning</a></li> </ul></li> <li class="chapter" data-level="31.4" data-path="tree-based-methods.html"><a href="tree-based-methods.html#properties-of-tree-method"><i class="fa fa-check"></i><b>31.4</b> Properties of Tree Method</a></li> <li class="chapter" data-level="31.5" data-path="tree-based-methods.html"><a href="tree-based-methods.html#random-forests"><i class="fa fa-check"></i><b>31.5</b> Random Forests</a></li> <li class="chapter" data-level="31.6" data-path="tree-based-methods.html"><a href="tree-based-methods.html#tree-based-methods-summary"><i class="fa fa-check"></i><b>31.6</b> Tree-based methods summary</a></li> </ul></li> <li class="chapter" data-level="32" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html"><i class="fa fa-check"></i><b>32</b> Model Selection and Evaluation</a><ul> <li class="chapter" data-level="32.1" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#classifier-evaluation"><i class="fa fa-check"></i><b>32.1</b> Classifier evaluation</a></li> <li class="chapter" data-level="32.2" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#model-selection"><i class="fa fa-check"></i><b>32.2</b> Model selection</a><ul> <li class="chapter" data-level="32.2.1" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#cross-validation"><i class="fa fa-check"></i><b>32.2.1</b> Cross Validation</a></li> <li class="chapter" data-level="32.2.2" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#validation-set"><i class="fa fa-check"></i><b>32.2.2</b> Validation Set</a></li> <li class="chapter" data-level="32.2.3" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#resampled-validation-set"><i class="fa fa-check"></i><b>32.2.3</b> Resampled validation set</a></li> <li class="chapter" data-level="32.2.4" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#leave-one-out-cross-validation"><i class="fa fa-check"></i><b>32.2.4</b> Leave-one-out Cross-Validation</a></li> <li class="chapter" data-level="32.2.5" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#k-fold-cross-validation"><i class="fa fa-check"></i><b>32.2.5</b> k-fold Cross-Validation</a></li> <li class="chapter" data-level="32.2.6" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#cross-validation-in-classification"><i class="fa fa-check"></i><b>32.2.6</b> Cross-Validation in Classification</a></li> <li class="chapter" data-level="32.2.7" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#comparing-models-statistically-using-cross-validation"><i class="fa fa-check"></i><b>32.2.7</b> Comparing models statistically using cross-validation</a></li> </ul></li> <li class="chapter" data-level="32.3" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#summary-4"><i class="fa fa-check"></i><b>32.3</b> Summary</a></li> </ul></li> <li class="chapter" data-level="33" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html"><i class="fa fa-check"></i><b>33</b> Unsupervised Learning: Clustering</a><ul> <li class="chapter" data-level="33.1" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#motivating-example"><i class="fa fa-check"></i><b>33.1</b> Motivating Example</a></li> <li class="chapter" data-level="33.2" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#some-preliminaries"><i class="fa fa-check"></i><b>33.2</b> Some Preliminaries</a></li> <li class="chapter" data-level="33.3" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#cluster-analysis"><i class="fa fa-check"></i><b>33.3</b> Cluster Analysis</a></li> <li class="chapter" data-level="33.4" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#dissimilarity-based-clustering"><i class="fa fa-check"></i><b>33.4</b> Dissimilarity-based Clustering</a></li> <li class="chapter" data-level="33.5" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#k-means-clustering"><i class="fa fa-check"></i><b>33.5</b> K-means Clustering</a></li> <li class="chapter" data-level="33.6" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#choosing-the-number-of-clusters"><i class="fa fa-check"></i><b>33.6</b> Choosing the number of clusters</a></li> <li class="chapter" data-level="33.7" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#summary-5"><i class="fa fa-check"></i><b>33.7</b> Summary</a></li> </ul></li> <li class="chapter" data-level="34" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html"><i class="fa fa-check"></i><b>34</b> Unsupervised Learning: Dimensionality Reduction</a><ul> <li class="chapter" data-level="34.1" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#principal-component-analysis"><i class="fa fa-check"></i><b>34.1</b> Principal Component Analysis</a><ul> <li class="chapter" data-level="34.1.1" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#solving-the-pca"><i class="fa fa-check"></i><b>34.1.1</b> Solving the PCA</a></li> </ul></li> <li class="chapter" data-level="34.2" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#multidimensional-scaling"><i class="fa fa-check"></i><b>34.2</b> Multidimensional Scaling</a></li> <li class="chapter" data-level="34.3" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#summary-6"><i class="fa fa-check"></i><b>34.3</b> Summary</a></li> </ul></li> </ul> </nav> </div> <div class="book-body"> <div class="body-inner"> <div class="book-header" role="navigation"> <h1> <i class="fa fa-circle-o-notch fa-spin"></i><a href="./">Lecture Notes: Introduction to Data Science</a> </h1> </div> <div class="page-wrapper" tabindex="-1" role="main"> <div class="page-inner"> <section class="normal" id="section-"> <div id="setting-up-the-data-science-toolbox" class="section level1"> <h1><span class="header-section-number">4</span> Setting up the Data Science Toolbox</h1> <div id="rrstudio" class="section level2"> <h2><span class="header-section-number">4.1</span> R/Rstudio</h2> <p>Here we setup R, RStudio and anything else we will use in the course.</p> <div id="some-history" class="section level3"> <h3><span class="header-section-number">4.1.1</span> Some history</h3> <p>R is an offspring of S, a language created in AT&amp;T Labs by <NAME> (now at Stanford) and others in 1976 with the goal of creating an environment for statistical computing and data analysis. The standard for the language in current use was settled in 1998. That same year, “S” won the ACM Software System award, awarded to software systems “that have a lasting influence, reflected in contributions to concepts, in commercial acceptance, or both”.</p> <p>In 1991, <NAME> and <NAME> created R to provide an open source implementation of the S language and environment. They also redesigned the language to enforce lexical scoping rules. It has been maintained by the R core group since 1997, and in 2015 an R consortium, including Microsoft, Google, and others, was created.</p> <p>Along with Python it is one of the most popular environments for data analysis (e.g., figure below from <a href="https://www.kdnuggets.com/2018/05/poll-tools-analytics-data-science-machine-learning-results.html">KDNuggets 2018 software survey</a></p> <p><img src="img/kdnuggets-2018.jpg" /></p> <p>We use R for this class because we find that besides it being a state-of-the-art data analysis environment, it provides a clean end-to-end platform for teaching material across the data management-modeling-communication spectrum that we study in class. However, be aware that as you move on in the Data Science field you most likely will need to add Python to your toolbelt.</p> </div> <div id="setting-up-r" class="section level3"> <h3><span class="header-section-number">4.1.2</span> Setting up R</h3> <p>R is a free, open source, environment for data analysis. It is available as a free binary download for Mac, Linux and Windows. For the more adventorous, it can also be compiled from source. To install R in your computer go to <a href="https://cran.r-project.org/index.html" class="uri">https://cran.r-project.org/index.html</a> and download and install the appropriate binary file.</p> <p><img src="img/cran.png" /></p> <p>This will install the base R system: the R programming language, a few packages for common data analyses and a development environment.</p> </div> <div id="setting-up-rstudio" class="section level3"> <h3><span class="header-section-number">4.1.3</span> Setting up Rstudio</h3> <p>We will actually use Rstudio to interact with R. Rstudio is a very powerful application to make data analysis with R easier to do. To install go to <a href="https://www.rstudio.com/products/rstudio/download/" class="uri">https://www.rstudio.com/products/rstudio/download/</a> and download the appropriate version of Rstudio.</p> <p><img src="img/rstudio.png" /></p> </div> <div id="a-first-look-at-rstudio" class="section level3"> <h3><span class="header-section-number">4.1.4</span> A first look at Rstudio</h3> <p>Let’s take a first look at Rstudio. The first thing you will notice is that Rstudio is divided into panes. Let’s take a look first at the <em>Console</em>.</p> </div> <div id="interactive-console" class="section level3"> <h3><span class="header-section-number">4.1.5</span> Interactive Console</h3> <p>The most immediate way to interact with R is through the interactive console. Here we can write R instructions to perform our data analyses. We want to start using data so the first instructions we will look at deal with loading data.</p> <p>When you installed R, a few illustrative datasets were installed as well. Let’s take a look at the list of datasets you now have access to. Write the following command in the console</p> <p><img src="img/rstudio_data.png" /></p> <p>This will list names and descriptions of datasets available in your R installation. Let’s try to find out more information about these datasets. In R, the first attempt to get help with something is to use the <code>?</code> operation. So, to get help about the <code>swiss</code> dataset we can enter the following in the console</p> <p>This will make the documentation for the <code>swiss</code> dataset open in another pane.</p> <p><img src="img/rstudio_swiss.png" /></p> <p><strong>On your own:</strong> Find more information about a different dataset using the <code>?</code> operator.</p> </div> <div id="data-viewer" class="section level3"> <h3><span class="header-section-number">4.1.6</span> Data Viewer</h3> <p>According to the documentation we just saw for <code>swiss</code>, this is a <code>data.frame</code> with 47 observations and 6 variables. The <code>data.frame</code> is the basic structure we will use to represent data throughout the course. We will see this again repeatedly, and use a couple of other names (e.g., <code>tibble</code>) to refer to this. Intuitively, you can think of the <code>data.frame</code> like a spreadsheet, with rows representing observations, and columns representing variables that describe those observations. Let’s see what the <code>swiss</code> data looks like using the Rstudio data viewer.</p> <p><img src="img/rstudio_view_swiss.png" /></p> <p>The Data Viewer lets you reorder data by the values in a column. It also lets you filter rows of the data by values as well.</p> <p><strong>On your own</strong>: Use the Data Viewer to explore another of the datasets you saw listed before.</p> </div> <div id="names-values-and-functions" class="section level3"> <h3><span class="header-section-number">4.1.7</span> Names, values and functions</h3> <p>Let’s make a very short pause to talk about something you may have noticed. In the console, we’ve now written a few instructions, e.g. <code>View(swiss)</code>. Let’s take a closer look at how these instructions are put together.</p> <p><em>expressions</em>: first of all, we call these instructions <em>expressions</em>, which are just text that R can evaluate into a value. <code>View(swiss)</code> is an expression.</p> <p><em>values</em>: so, what’s a value? They are numbers, strings, data frames, etc. This is the data we will be working with. The number <code>2</code> is a value. So is the string <code>&quot;Hector&quot;</code>.</p> <p>So, what value is produced when R evaluates the expression <code>View(swiss)</code>? Nothing, which we also treat as a value. That wasn’t very interesting, but it does have a side effect: it shows the <code>swiss</code> dataset in the Data viewer.</p> <p>How about a simpler expression: <code>swiss</code>, what value is produced when R evaluates the expression <code>swiss</code>? The data.frame containing that data. Try it out in the console.</p> <p><em>names</em>: so if <code>swiss</code> isn’t a value, what is it? It is a <em>name</em>. We use these to refer to values. So, when we write the expression <code>swiss</code>, we tell R we want the <em>value</em> referenced by the name <code>swiss</code>, that is, the data itself!</p> <p><img src="img/names_values.png" /></p> <p><em>functions</em>: Besides numbers, strings, data frames, etc. another important type of value is the <em>function</em>. Functions are a series of instructions that take some input value and produce a different value. The name <code>View</code> refers to the function that takes a data frame as input, and displays it in the Data viewer. Functions are called using the parentheses we saw before: <code>View(swiss)</code>, the parentheses say that you are passing input <code>swiss</code> to the function <code>View</code>. We’ll see later how we can write our own functions.</p> </div> <div id="plotting" class="section level3"> <h3><span class="header-section-number">4.1.8</span> Plotting</h3> <p>Next, I want to show the <em>Plots</em> pane in Rstudio. Let’s make a plot using the <code>swiss</code> dataset:</p> <p><img src="img/rstudio_plot_swiss.png" /></p> <p>It’s not pretty, but it was very easy to produce. There’s a couple of things going on here…</p> <ul> <li><p><code>plot</code> is a function, it takes two inputs, the data to put in the x and y axes, evaluates to nothing, but creates a plot of the data</p></li> <li><p><code>swiss$Education</code> is how we refer to the <code>Education</code> column in the <code>swiss</code> data frame.</p></li> </ul> <p><strong>On your own</strong>: Make a plot using other variables in the <code>swiss</code> dataset.</p> </div> <div id="editor" class="section level3"> <h3><span class="header-section-number">4.1.9</span> Editor</h3> <p>So far, we’ve made some good progress: we know how to write expressions on the R console so that they are evaluated, we are starting to get a basic understanding of how these expressions are constructed, we can use the Data viewer to explore data frames, and made one plot that was displayed in the Plots pane. To finish this quick tour, I want to look at two more Rstudio panes: the file editor, and the File viewer.</p> <p>As you have noticed, everytime we want to evaluate an expression on the console, we have to write it in. For example, if we want to change the plot we made above to include a different variable, we have to write the whole thing again. Also, what if I forgot what expression I used to make a specific plot? Even better, what if I wanted somebody else to make the plot I just made?</p> <p>By far, one of the biggest advantages of using R over Excel or other similar programs, is that we can write expressions in scripts that are easy to share with others, making analyses easier to reproduce. Let’s write a script that we can use to make the same plot we just made.</p> <p>In the Rstudio menu select <code>File&gt;New File&gt;R Script</code></p> <p><img src="img/rstudio_new_script.png" /></p> <p>This will open a tab in the File editor in which we can write expressions:</p> <p><img src="img/rstudio_file.png" /></p> <p>We can then evaluate the expressions in the file one at a time, or all at the same time.</p> <p>We can then save these expressions in a script. In the Rstudio menu select <code>File&gt;Save</code> and save as a text file. The convention is to use the <code>.R</code> or <code>.r</code> file extension, e.g., <code>swiss_plot.r</code>.</p> <p><strong>On your own:</strong> Add expressions for additional plots to the script and save again. Run the new expressions.</p> </div> <div id="files-viewer" class="section level3"> <h3><span class="header-section-number">4.1.10</span> Files viewer</h3> <p>Rstudio includes a Files viewer that you can use to find and load files. You can find the Files near the Plots viewer</p> <p><img src="img/rstudio_files.png" /></p> </div> <div id="r-packages" class="section level3"> <h3><span class="header-section-number">4.1.11</span> R packages</h3> <p>Another of R’s advantages for data analysis is that it has attracted a large number of extremely useful additions provided by users worldwide. These are housed in <a href="https://cran.r-project.org/web/packages/index.html">CRAN</a>.</p> <p>In this course we will make a lot of use of a set of packages bundled together into the <code>tidyverse</code> by <NAME> and others. These packages make preparing, modeling and visualizing certain kinds data (which covers the vast majority of use cases) quite fun and pleasent. There is a webpage for the general tidyverse project: <a href="http://tidyverse.org" class="uri">http://tidyverse.org</a>, which includes pages for each of the packages included there.</p> <p>Let’s install the <code>tidyverse</code> into your R environment. There are two ways of installing packages. In the console, you can use the expression:</p> <p>In Rstudio, you can use the <em>Packages</em> tab:</p> <p><img src="img/rstudio_install_packages.png" /></p> <p><strong>On your own:</strong> Install the following additional packages which we will use later on: <code>rvest</code>, <code>stringr</code>, <code>nycflights13</code> and <code>broom</code>.</p> </div> <div id="additional-r-resources" class="section level3"> <h3><span class="header-section-number">4.1.12</span> Additional R resources</h3> <p>Resources for learning and reading about R are listed in our <a href="http://www.hcbravo.org/IntroDataSci/resources/">here</a>. Of note are the <a href="http://swirlstats.com/">swirl project</a> and DataCamp’s [introduction to R] course.</p> </div> <div id="literate-programming" class="section level3"> <h3><span class="header-section-number">4.1.13</span> Literate Programming</h3> <p>One last note before we get started. R has great support for <a href="http://en.wikipedia.org/wiki/Literate_programming">literate programming</a>, where source code that contains both code, the result of evaluating that code, and text explaining that code co-exist in a single document. This is extremely valuable in data analysis, as many choices made by data analysts are worth explaning in text, and interpretation of the results of analyses can co-exist with the computations used in that analysis. This document you are reading contains both text and code. In class, we will use <a href="http://rmarkdown.rstudio.com/">Rmarkdown</a> for this purpose.</p> </div> <div id="course-packages" class="section level3"> <h3><span class="header-section-number">4.1.14</span> Course packages</h3> <p>We provide a simple script to install most of the R packages used in this class. Download the script here:</p> <p><a href="http://www.hcbravo.org/IntroDataSci/misc/cmsc320_install_libs.R" class="uri">http://www.hcbravo.org/IntroDataSci/misc/cmsc320_install_libs.R</a></p> <p>On the command line, or on the Rstudio terminal tab, run the following:</p> <pre><code>Rscript cmsc320_install_libs.R</code></pre> </div> </div> <div id="pythonjupyter" class="section level2"> <h2><span class="header-section-number">4.2</span> Python/Jupyter</h2> <p>To use python, we recommend to use <a href="https://www.anaconda.com/">Anaconda</a> a great platform for package and software management, especially for data science.</p> <p>For the purposes of the class, it is sufficient to download the streamlined <code>Miniconda</code> distribution:</p> <p><a href="https://docs.conda.io/en/latest/miniconda.html" class="uri">https://docs.conda.io/en/latest/miniconda.html</a></p> <p>We assume you installed the Python3 version.</p> <p>To install the tools needed for the course, we have provided an environment file that lists all software we will use. Download the file here:</p> <p><a href="http://www.hcbravo.org/IntroDataSci/misc/cmsc320_conda.yml" class="uri">http://www.hcbravo.org/IntroDataSci/misc/cmsc320_conda.yml</a></p> <p>To create an Anaconda environment use <code>conda env create -f=cmsc320_conda.yml -n cmsc320</code></p> <p>Once installed and loaded (with <code>conda activate cmsc320</code>, we recommend you use jupyter lab for this course. Start it using <code>jupyter lab</code>.</p> <p>There you can create jupyter notebooks to carry out your work.</p> <p>Final note: most of the support for the libraries included in the <code>tidyverse</code> is given by the <code>pandas</code> python package:</p> <p><a href="https://pandas.pydata.org/" class="uri">https://pandas.pydata.org/</a></p> <p>We are using the tidyverse’s <code>ggplot2</code> package for plotting. There is a <code>plotnine</code> python package available that supports most of the same functionality. It is included in the environment we have provided.</p> </div> </div> </section> </div> </div> </div> <a href="an-illustrative-analysis.html" class="navigation navigation-prev " aria-label="Previous page"><i class="fa fa-angle-left"></i></a> <a href="part-data-representation-modeling-ingestion-and-cleaning.html" class="navigation navigation-next " aria-label="Next page"><i class="fa fa-angle-right"></i></a> </div> </div> <script src="libs/gitbook-2.6.7/js/app.min.js"></script> <script src="libs/gitbook-2.6.7/js/lunr.js"></script> <script src="libs/gitbook-2.6.7/js/clipboard.min.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-search.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-sharing.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-fontsettings.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-bookdown.js"></script> <script src="libs/gitbook-2.6.7/js/jquery.highlight.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-clipboard.js"></script> <script> gitbook.require(["gitbook"], function(gitbook) { gitbook.start({ "sharing": { "github": false, "facebook": false, "twitter": false, "linkedin": false, "weibo": false, "instapaper": false, "vk": false, "all": ["facebook", "twitter", "linkedin", "weibo", "instapaper"] }, "fontsettings": { "theme": "white", "family": "sans", "size": 2 }, "edit": { "link": null, "text": null }, "history": { "link": null, "text": null }, "view": { "link": null, "text": null }, "download": null, "toc": { "collapse": "section", "scroll_highlight": true } }); }); </script> <!-- dynamically load mathjax for compatibility with self-contained --> <script> (function () { var script = document.createElement("script"); script.type = "text/javascript"; var src = "true"; if (src === "" || src === "true") src = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-MML-AM_CHTML"; if (location.protocol !== "file:") if (/^https?:/.test(src)) src = src.replace(/^https?:/, ''); script.src = src; document.getElementsByTagName("head")[0].appendChild(script); })(); </script> </body> </html> <file_sep>/materials/lectures/IntroStatLearn/IntroStatLearn.Rmd --- title: 'Introduction to Statistical Learning' author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` One of the purposes of this class is for you to learn Statistical and Machine Learning techniques commonly used in data analysis. By the end of the term, you should be able to read papers that use these methods critically and analyze data using them. When using any of these tools we will be we will be asking ourselves if our findings are "statistically significant". For example, if we make use of a classification algorithm and find that we can correctly predict an outcome in 70 out of our 100 cases, how can we determine if this could have happened by chance alone? To be able to answer these questions, we need to understand some basic probabilistic and statistical principles. In this section we will review some of these principles. ## Variation, randomness and stochasticity In the preceeding sections of the class we have not spoken too much about randomness and stochasticity. We have spoken about _variation_ though. When we discussed the notion of _spread_ in a given dataset, measured by the sample standard deviation, for example, we are referring to the fact that in a population of entities (e.g., images of cats) there is naturally occuring variation in measurements. Notice that we can discuss the notation of _variation_ without referring to any randomness, stochasticity or noise. Why probability then? Because, we do want to distinguish, when possible, between natural occuring variation and randomness or stochasticity. For instance, suppose we want to learn something about education loan debt for 19-30 year olds in Maryland. We could find loan debt for **all** 19-30 year old Maryland residents, and calculate average and standard deviation. But that's difficult to do for all residents. So, instead we sample (say by randomly sending Twitter surveys), and _estimate_ the average and standard deviation of debt in this population from the sample. The issue is, we could do the same from a different random sample and get a different set of estimates. Why? Because there is naturally-occuring variation in this population. So, a simple question to ask is, how good are our _estimates_ of debt mean and standard deviation from sample of 19-30 year old Marylanders? Now, suppose we build a predictive model of loan debt for 19-30 year old Marylanders based on other variables (e.g., sex, income, education, wages, etc.) from our sample. How good will this model perform when predicting debt in general? We use probability and statistics to answer these questions. We use probability to capture stochasticity in the sampling process and model naturally occuring variation in measurements in a population of interest. One final word, the term _population_ which we use extensively here means **the entire** collection of entities we want to model. This could include people, but also images, text, GO positions, etc. ### Random variables The basic concept in our discussion of probability is the _random variable_. Consider a situation where you are tasked with performing spam detection in tweets. You sample a tweet at random from the set of all tweets ever written and have a human expert decide if it is spam or not. You can denote this as random variable $X \in \{0,1\}$, with value $1$ if the tweet is spam and 0 otherwise. Why is this a random value? Because it depends on the tweet that was _randomly_ sampled. ### (Discrete) Probability distributions Now we can start talking about the distribution of values of a random variable. In our example, random variable $X$ can take values 0 or 1. We would like to specify how these values are distributed over the set of all possible tweets one can randomly sample. We use a probability distribution to do this. A _probability distribution_ is a function $P$ over all values $X$ can take to the interval $[0,1]$ describing how values of $X$ are distributed. We start with a _probability mass function_ $p$ which must satisfy two properties: a. $p(X=x) \geq 0$ for all values $x$ that random variable $X$ can take, and b. $\sum_{\mathrm{values } X \mathrm{ can take}} p(X=x) = 1$ Now, how do we interpret $p(X=1)$? a. the probability that a uniformly random samples tweet is spam, which implies b. the proportion of tweets that are spam in the set of "all" tweets. I say "all" because it's really the set of tweets one could possibly sample. Armed with a _probability mass function_ we can talk about a _cumulative probability distribution_ that describes the sum of probability up to a given value. We saw a similar concept for the empirical distribution of data when we discussed quantiles. ### The oracle of TWEET Suppose we have a magical oracle and know for a _fact_ that 70%% of "all" tweets are spam. In that case $p(X=1) = .7$ and $p(X=0)=1-.7=.3$. ### Expectation What if I randomly chose $n=100$ tweets, how many of those do I _expect_ to be spam? _Expectation_ is a formal concept in probability: $$ \mathbb{E} X = \sum_{\mathrm{values } X \mathrm{ can take}} x p(X=x) $$ What is the expectation of $X$ in our tweet example? $$ 0 \times p(X=0) + 1 \times p(X=1) = \ 0 \times .3 + 1 \times .7 = .7 $$ Now, what is the expectation of $Y=X_1 + X_2 + \cdots + X_{100}$? What, what is $Y$? Remember we want to know what is my expectation of the number of spam tweets in a sample of $n=100$ tweets. We have $X_i=\{0,1\}$ for each of the $n=100$ tweets, each a random variable, which we obtained by uniformly and _independently_ sampling for the set of all tweets. With that, now random variable $Y$ equals the number of spam tweets in my sample of $n=100$ tweets. In that case: $$ \begin{aligned} \mathbb{E} Y & = \mathbb{E} (X_1 + X_2 + \cdots + X_{100}) \\ {} & = \mathbb{E} X_1 + \mathbb{E} X_2 + \cdots + \mathbb{E} X_{100} \\ {} & = .7 + .7 + \cdots + .7 \\ {} & = 100 \times .7 \\ {} & = 70 \end{aligned} $$ This uses some facts about expectation you can show in general. (1) For any pair of random variables $X_1$ and $X_2$, $\mathbb{E} (X_1 + X_2) = \mathbb{E} X_1 + \mathbb{E} X_2$. (2) For any random variable $X$ and _constant_ a, $\mathbb{E} aX = a \mathbb{E} X$. ### Estimation Our discussion so far has assumed that we know that $p(X=1)=.7$, but we _don't_. For our tweet analysis task, we need to _estimate_ the proportion of "all" tweets that are spam. This is where our probability model and the expectation we derive from it comes in. Given data $x_1, x_2, x_3, \ldots, x_{100}$, with 67 of those tweets labeled as spam (i.e., $x_i=1$ for 67 of them), we can say $y=\sum_i x_i=67$. Now from our discussion above, we _expect_ $y=np$ where $p=p(X=1)$, so let's use that observation to _estimate_ $p$! $$ \begin{aligned} np = 67 & \Rightarrow \\ 100p = 67 & \Rightarrow \\ \hat{p} = \frac{67}{100} & \Rightarrow \\ \hat{p} = .67 \end{aligned} $$ Our estimate is close (remember we had an oracle of TWEET), but is it any good? Notice that our estimate of $p$, $\hat{p}$ is the sample _mean_ of $x_1,x_2,\ldots,x_n$. Let's go back to our oracle of tweet to do a thought experiment and replicate how we derived our estimate from 100 tweets a few thousand times. ```{r} # proportion of spam in the the tweet population # as given by the oracle of TWEET p <- 0.7 # let's sample 100 tweets # this function chooses between values in a vector (0 and 1) # with probability given by vector prob # we need 100 samples from this vector with replacement # since there are fewer items in the vector than the size # of the sample we are making x <- sample(c(0,1), size=100, replace=TRUE, prob=c(1-p,p)) # and the proportion that are spam (using the sample mean) phat <- mean(x) # if we had an oracle that let's us do this cheaply, # we could replicate our experiment 1000 times # (you don't in real life) # first let's write a function that gets an estimate # from a random sample get_estimate <- function(n,p=0.7) mean(sample(c(0,1), size=n, replace=TRUE, prob=c(1-p,p))) phats_100 <- replicate(1000, get_estimate(100)) hist(phats_100, xlab="p hat", xlim=c(0.5,1), main="Distribution of p estimates from 100 tweets") ``` Now what if instead of sampling $n=100$ tweets we used other sample sizes? ```{r} par(mfrow=c(2,3)) # what if we sample 10 tweets phats_10 <- replicate(1000, get_estimate(10)) hist(phats_10, main="10 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what if we sample 100 tweets phats_100 <- replicate(1000, get_estimate(100)) hist(phats_100, main="100 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what if we sample 500 tweets phats_500 <- replicate(1000, get_estimate(500)) hist(phats_500, main="500 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 1000 tweets phats_1000 <- replicate(1000, get_estimate(1000)) hist(phats_1000, main="1000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 5000 tweets phats_5000 <- replicate(1000, get_estimate(5000)) hist(phats_5000, main="5000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 10000 tweets phats_10000 <- replicate(1000, get_estimate(10000)) hist(phats_10000, main="10000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) ``` We can make a couple of observations: 1. The distribution of estimated $\hat{p}$ is _centered_ at $p=.7$, our unknown population proportion, and 2. The _spread_ of the distribution depends on the number of samples $n$. This is an illustration of two central tenets of statistics that serves as the foundation of much of what we will do later in the course to interpret the models we build from data. ### Law of large numbers (LLN) Given random variables $X_1,X_2,\cdots,X_n$ with $\mathbb{E} X_i=\mu$ for all $i$: $$ \mathbb{E} Y=\frac{1}{n} \sum_i X_i = \mu $$ (under some assumptions beyond the scope of this class). This says that if you have samples from a random variable (regardless of how it is distributed) the sample mean will tend to the population mean. I.e., for averages, using the sample mean is the right thing to estimate parameters by matching expectation! In statistics speak we say that the sample mean is an _unbiased_ estimate of $\mu$. ### Central Limit Theorem (CLT) The LLN says that estimates built using the sample mean will be centered around the correct answer, the CLT describes how these estimates are _spread_ around the correct answer. Here we will use the concept of _variance_ which is expected _spread_, measured in squared distance, from the _expected value_ of a random variable: $$ \mathrm{var(X)} = \mathbb{E} (X - \mathbb{E} X)^2 $$ Example: consider the variance of our random tweet example: $$ \begin{aligned} \mathrm{var(X)} & = \sum_{\mathrm{all values } X \mathrm{ can take}} (x-\mathbb{E} X)^2 p(X=x) \\ {} & = (0 - p)^2 \times (1-p) + (1 - p)^2 \times p \\ {} & = p^2(1-p) + (1-p)^2p \\ {} & = p(1-p) (p + (1-p)) \\ {} & = p(1-p) (p - p + 1) \\ {} & = p(1-p) \end{aligned} $$ Now, we can state the CLT: $$ Y = \frac{1}{n} \sum_{i=1} X_i $$ tends _towards_ a **normal** distribution as $n \rightarrow \infty$. This says, that as sample size increases the distribution of sample means is well approximated by a normal distribution. ### The normal distribution It describes the distribution of _continuous_ random variables over the range $(-\infty,\infty)$ using two parameters: mean $\mu$ and standard deviation $\sigma$. We write "$Y$ is normally distributed with mean $\mu$ and standard deviation $\sigma$" as $Y\sim N(\mu,\sigma)$. We write its _probability density function_ as: $$ p(Y=y) = \frac{1}{\sqrt{2\pi}\sigma} \mathrm{exp} \left\{ -\frac{1}{2} \left( \frac{y-\mu}{\sigma} \right)^2 \right\} $$ Here are three examples of probability density functions of normal distributions with mean $\mu=60,50,60$ and standard deviation $\sigma=2,2,6$: ```{r} # 100 equally spaced values between 40 and 80 yrange <- seq(40, 80, len=100) # values of the normal density function density_values_1 <- dnorm(yrange, mean=60, sd=2) density_values_2 <- dnorm(yrange, mean=50, sd=2) density_values_3 <- dnorm(yrange, mean=60, sd=6) # now plot the function plot(yrange, density_values_1, type="l", col="red", lwd=2, xlab="y", ylab="density") lines(yrange, density_values_2, col="blue", lwd=2) lines(yrange, density_values_3, col="orange", lwd=2) legend("topright", legend=c("mean 60, sd 2", "mean 50, sd 2", "mean 60, sd 6"), col=c("red","blue","orange"), lwd=2) ``` Like the discrete case, probability density functions for continuous random variables need to satisfy certain conditions: a. $p(Y=y) \geq 0$ for all values $Y \in (-\infty,\infty)$, and b. $\int_{-\infty}^{\infty} p(Y=y) dy = 1$ One way of remembering the density function of the normal distribution is that probability decays exponentially with rate $\sigma$ based on squared distance to the mean $\mu$. (Here is squared distance again!) Also, notice the term inside the squared? $$ z = \left( \frac{y - \mu}{\sigma} \right) $$ this is the _standardization_ transformation we saw in previous lectures. In fact the name _standardization_ comes from the _standard normal distribution_ $N(0,1)$ (mean 0 and standard deviation 1), which is very convenient to work with because it's density function is much simpler: $$ p(Z=z) = \frac{1}{\sqrt{2\pi}} \mathrm{exp} \left\{ -\frac{1}{2} z^2 \right\} $$ In fact, if random variable $Y \sim N(\mu,\sigma)$ then random variable $Z=\frac{Y-\mu}{\sigma} \sim N(0,1)$. ### CLT continued We need one last bit of terminology to finish the statement of the CLT. Consider data $X_1,X_2,\cdots,X_n$ with $\mathbb{E}X_i= \mu$ for all $i$, **and** $\mathrm{sd}(X_i)=\sigma$ for all $i$, and their sample mean $Y=\frac{1}{n} \sum_i X_i$. The standard deviation of $Y$ is called the _standard error_: $$ \mathrm{se}(Y) = \frac{\sigma}{\sqrt{n}} $$ Ok, now we can make the CLT statement precise: the distribution of $Y$ tends _towards_ $N(\mu,\frac{\sigma}{\sqrt{n}})$ as $n \rightarrow \infty$. This says, that as sample size increases the distribution of sample means is well approximated by a normal distribution, and that the spread of the distribution goes to zero at the rate $\sqrt{n}$. Disclaimer: there a few mathematical sublteties. Two important ones are that a. $X_1,\ldots,X_n$ are iid (independent, identically distributed) random variables, and b. $\mathrm{var}X < \infty$ Let's redo our simulated replications of our tweet samples to illustrate the CLT at work: ```{r} # we can calculate standard error for each of the # settings we saw previously and compare these replications # to the normal distribution given by the CLT # let's write a function that adds a normal density # plot for a given sample size draw_normal_density <- function(n) { se <- sqrt(p*(1-p))/sqrt(n) f <- dnorm(seq(0.5,1,len=1000), mean=p, sd=se) lines(seq(0.5,1,len=1000), f, col="red", lwd=1.6) } par(mfrow=c(2,3)) # what if we sample 10 tweets phats_10 <- replicate(1000, get_estimate(10)) hist(phats_10, main="10 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(10) # what if we sample 100 tweets phats_100 <- replicate(1000, get_estimate(100)) hist(phats_100, main="100 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(100) # what if we sample 500 tweets phats_500 <- replicate(1000, get_estimate(500)) hist(phats_500, main="500 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(500) # what about 1000 tweets phats_1000 <- replicate(1000, get_estimate(1000)) hist(phats_1000, main="1000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(1000) # what about 5000 tweets phats_5000 <- replicate(1000, get_estimate(5000)) hist(phats_5000, main="5000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(5000) # what about 10000 tweets phats_10000 <- replicate(1000, get_estimate(10000)) hist(phats_10000, main="10000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(10000) ``` Here we see the three main points of the LLN and CLT: (1) the normal density is centered around $\mu=.7$, (2) the normal approximation gets better as $n$ increases, and (3) the standard error goes to 0 as $n$ increases. ### Inference Now let's use the CLT and LLN in data analysis. The picture of how we use probability in data analysis (statistical and machine learning) is somewhat like this: ![](inference.png) The LLN tells us that our parameter $\hat{p}$ will be close to $p$ on average, the CLT lets us answer how confident are we that we found $p$. We do this by constructing a _confidence interval_ as follows. Since $\hat{p} \sim N(p,\frac{\sqrt{p(1-p)}}{\sqrt{n}})$, we want to find an interval $[\hat{p}_{-}, \hat{p}_{+}]$, with $\hat{p}$ at its center, with 95%% of the probability specified by the CLT. Why? In that case, there is 95% that the value of parameter $p$ will be within that interval. Now, how do we calculate this interval, since we want the interval to contain 95%% of the probability, the probability for the tails (values outside this interval) will be $(1-.95)/2$ (since there are two tails). So, the lower value of the interval will be one where the normal probability distribution (with mean $\hat{p}$ and standard deviation $\frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}$) is such that $P(Y \leq \hat{p}_{-}) = .05/2$, which we can calculate using the function `qnorm` function in R: $$ \begin{align} \hat{p}_{-} & = \mathtt{qnorm}(.05/2, \hat{p}, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}) \\ {} & = \hat{p} + \mathtt{qnorm}(.05/2,0, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}) \end{align} $$ The upper value of the interval is computed with probability $1-(.05/2)$, which by the symmetry of the normal distribution is given by $\hat{p}_{+} = \hat{p} + -\mathtt{qnorm}(.05/2,0, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}})$. Let's see how these intervals look for our twitter spam example: ```{r, message=FALSE} library(dplyr) set.seed(1) # let's construct confidence intervals for samples of size n=10,100,500,1000,10000 tab <- data.frame(sample_size=c(10,100,500,1000,10000)) %>% mutate(phat = sapply(sample_size,get_estimate)) %>% mutate(se = sqrt(phat*(1-phat)) / sqrt(sample_size)) %>% mutate(lower = phat + qnorm(.05/2, sd=se)) %>% mutate(upper = phat + -qnorm(.05/2, sd=se)) knitr::kable(tab) ``` For our sample of $n=500$, we would say that our estimate of $p$ is $`r round(tab$phat[3],2)` \pm `r round(qnorm(.05/2,sd=tab$se[3]),2)`$. A compact way of writing that is that our estimate of $p$ is ${}_{`r round(tab$lower[3],2)`}`r round(tab$phat[3],2)`_{`r round(tab$upper[3],2)`}$. ### Hypothesis testing How else is this framework useful? Suppose that before I sampled tweets I thought (hypothesized) that more than 50% of tweets are spam. One way very popular way of thinking about this problem is to reject the hypothesis that this is not the case. In this case we have a _null_ hypothesis that 50% or less of tweets are spam), against an _alternative_ hypothesis that more than 50% of tweets are spam. You will see this written in statistics textbooks as: $$ \begin{align} H_0: \, & p <= .5 & \textrm{(null)} \\ H_1: \, & p > .5 & \textrm{(alternative)} \end{align} $$ Note: this is a _one-sided_ test vs. a _two-sided_ test where the null hypothesis is that $p=.5$ and the alternative is $p \neq .5$. According to the CLT, estimates $\hat{p}$ of $p$ from $n$ samples would be distributed as $N(.5, \frac{\sqrt{.5(1-.5)}}{\sqrt{n}})$ (we use $p=.5$ as this is the worst case for the hypothesis we want to test). Once we do have our sample of $n$ tweets we can get an estimate $\hat{p}$ as we did before. If we see that $\hat{p}$ (sample mean from our sample of tweets) is _too far_ from $p=.5$ then we could reject the _null_ hypothesis since the estimate we derived from the data we have is not statistically consistent with the _null_ hypothesis. Now, how do we say our estimate $\hat{p}$ is too far? Here, we use the probability model given by the CLT. If $P(Y \geq \hat{p}) \geq .95$ under the null model (of $p=.5$), we say it is too far and we reject. ![](testing.png) This 95% rejection threshold is conservative, but somewhat arbitrary. So we use on more metric, $P(|Y| \geq \hat{p})$ (the infamous p-value) to say: we could reject this hypothesis for all thresholds greater than this p-value. Let's see how testing the hypothesis $p > .5$ would look like for our tweet example ```{r} tab <- tab %>% mutate(p_value = 1-pnorm(phat, mean=.5, sd=se)) knitr::kable(tab) ``` Notice that rejection occurs when the parameter value for the null hypothesis $p=.5$ is outside the 95% confidence interval. Another note, these results hold for $n$ sufficiently large that the normal distribution in the CLT provides a good approximation of the distribution of estimates $\hat{p}$. In cases where $n$ is smaller, the $t$-distribution, as opposed to the normal distribution, provides a better approximation of the distribution of estimates $\hat{p}$. In that case, instead of using `pnorm` in the calculations above, we would use `pt` (for $t$-distribution) and the testing procedure above is referred to as a $t$-test (one-sided or two-sided as above). Now, as $n$ grows, the $t$-distribution approaches a normal distribution which is why analysts use the $t$-test regularly. ### Summary **Inference**: estimate parameter from data based on assumed probability model (for example, matching expectation. We'll see later another method called maximum likelihood). For _averages_ the LLN and CLT tells us how to compute probabilities from a single parameter estimate, that is, derived from one dataset of samples. With these probabilities we can construct confidence intervals for our estimate. **Testing**: Having a hypothesis about our parameter of interest, we can use probability _under this hypothesis_ to see how statistically consistent our data is with that hypothesis, and reject the hypothesis if data is not statistically consistent enough (again using probability from CLT when dealing with averages). ### Probability Distributions In this example we saw three distributions: #### Bernoulli Notation: $X \sim \mathrm{Bernoulli}(p)$. Values: $X \in \{0,1\}$ Parameter: $p$, $p(X=1)=p$ (probability of success). Expected Value: $\mathbb{E} X = p$ Variance: $\mathrm{var}(X) = p(1-p)$. We can write the probability mass function as $$ p(X=x)=p^x(1-p)^{(1-x)} $$ #### Binomial This corresponds to the number of $1$'s in a draw of $n$ independent $\mathrm{Bernoulli}(p)$ random variables. Notation: $X \sim \mathrm{Bin(n,p)}$. Values: $X \in 0,1,2,\ldots,n$ Parameters: $p$ (probability of success), $n$ number of Bernoulli draws Expected Value: $\mathbb{E} X=np$ Variance: $\mathrm{var}(X) = np(1-p)$ Here the probability mass function is a little more complicated since we have many different ways in which $n$ draws of independent Bernoulli random variables result in the same number of successess $$ p(X=k) = \binom{n}{k} p^k(1-p)^{n-k} $$ #### Normal (Gaussian) distribution Notation: $X \sim N(\mu,\sigma)$ Values: $X \in \mathbb{R}$ Parameters: mean $\mu$, standard deviation $\sigma$ Expected Value: $\mathbb{E} X = \mu$ Variance: $\mathrm{var}(X) = \sigma^2$ The probability density function was given above. A useful reference for probability distributions can be found here: [https://blog.cloudera.com/blog/2015/12/common-probability-distributions-the-data-scientists-crib-sheet/](https://blog.cloudera.com/blog/2015/12/common-probability-distributions-the-data-scientists-crib-sheet/) ### Distributions in R For a majority of common distributions, R has the so-called `d,p,q,r` family of functions: | function | use | |----------|-----| | `d` | probability density (or mass) function | | `p` | cumulative probability function | | `q` | quantile function | | `r` | random value generator | For example, to use these for the Binomial distribution: ```{r, eval=FALSE} # using n=10, p=.3 # compute probability mass function value for k=4 successess dbinom(4, n=10, p=.3) # compute cumulative probability function for k=4 successess pbinom(4, n=10, p=.3) # compute the number of success corresponding to the .80th quantile qbinom(.8, n=10, p=.3) # generate a random value k rbinom(1, n=10, p=.3) ``` ### Joint and conditional probability Suppose that for each tweet I sample I can also say if it has _a lot_ of retweets or not. So, I have another binary random variable $Y \in \{0,1\}$ where $Y=1$ indicates the sampled tweet has a lot of retweets. (Note, we could say $Y\sim \mathrm{Bernoulli}(p_Y))$. So we could illustrate the population of "all" tweets as ![](joint.png) We can talk of the joint probability distribution of $X$ and $Y$: $p(X=x, Y=y)$. Here we have the same conditions: 1. $p(X=x,Y=y)\geq 0$ for all combination of values $x$ and $y$, and 2. $\sum_{\mathrm{all combination of values } X,Y \mathrm{ can take}} p(X=x,Y=y) = 1$ We can also talk about _conditional probability_ where we look at the probability of a tweet being spam _conditioned_ on it not having lots of retweets: $$ p(X=x | Y=y) $$ which also needs to satisfy the properties of a probability distribution. So to make sure $$ \sum_{\mathrm{all values } X \mathrm{ can take}} p(X=x|Y=y) = 1 $$ we define $$ p(X=x | Y=y) = \frac{p(X=x,Y=y)}{p(Y=y)} $$ This also lets us talk about _conditional independence_: if the probabilty of spam _does not_ depend on a tweet having lots of retweets, that is $p(X=x) = p(X=x|Y=y)$ for all $y$, then we say $X$ is _conditionally independent_ of $Y$. Consider the tweet diagram above, is $X$ conditionally independent of $Y$? What would the diagram look like if $X$ was conditionally independent of $Y$? One more note, you can also see that for conditionally independent variables, the joint probability has an easy form $p(X=x,Y=y)=p(X=x)p(Y=y)$, which generalizes to more than two independent random variables. ### Conditional expectation With conditional probabilty we can start talking about conditional expectation, which generalizes the concept of expectation we saw before. For example, the _conditional expected value_ (conditional mean) of $X$ given $Y=y$ is $$ \mathbb{E} [ X|Y=y ] = \sum_{\mathrm{all values } X \mathrm{ can take}} x p(X=x|Y=y) $$ This notion of conditional expectation, which follows from conditional probability, will serve as the basis for our Machine Learning method studies in the next few lectures! ### Maximum likelihood One last note. We saw before how we estimated a parameter from matching expectation from a probability model with what we observed in data. The most popular method of estimation uses a similar idea: given data $x_1,x_2,\ldots,x_n$ and an assumed model of their distribution, e.g., $X_i\sim \mathrm{Bernoulli}(p)$ for all $i$, and they are iid, let's find the value of parameter $p$ that maximizes the likelihood (or probability) of the data we observe under this assumed probability model. We call the resulting estimate the _maximum likelihood estimate_. Here are some fun exercises to try: 1) Given a sample $x_1$ with $X_1 \sim N(\mu,1)$, show that the maximum likelihood estimate of $\mu$, $\hat{\mu}=x_1$. It is most often convinient to _minimize negative log-likelihood_ instead of maximizing likelihood. So in this case: $$ \begin{align} -\mathscr{L}(\mu) & = - \log p(X_1=x_1) \\ {} & = \log{\sqrt{2\pi}} + \frac{1}{2}(x_1 - \mu)^2 \end{align} $$ To minimize this function of $\mu$ we can ignore all terms that are independent of $\mu$, and concentrate only on minimizing the last term. Now, this term is always positive, so the smallest value it can have is 0. So, we minimize it by setting $\hat{\mu}=x_1$. 2) Given a sample $x_1,x_2,\ldots,x_n$ of $n$ iid random variables with $X_i \sim N(\mu,1)$ for all $i$, show that the maximum likelihood estimate of $\mu$, $\hat{\mu}=\overline{x}$ the sample mean! Here we would follow a similar approach, write out the negative log likelihood as a function $f(\mu;x_i)$ of $\mu$ that depends on data $x_i$. Two useful properties here are: 1. $p(X_1=x_1,X_2=x_2,\ldots,X_n=x_n)=p(X_1=x_1)p(X_2=x_2)\cdots p(X_n=x_n)$, and 2. $\log \prod_i f(\mu;x_i) = \sum_i \log f(\mu;x_i)$ Then find a value of $\mu$ that minimizes this function. Hint: we saw this when we showed that the sample mean is the minimizer of total squared distance in our exploratory analysis unit! <file_sep>/materials/quizzes/transformations.md --- title: Transformations Exercise author: CMSC320 geometry: margin=1in fontfamily: utopia --- Consider data for variable $\mathbf{x}=x_1,x_2,\ldots,x_n$. We use $\overline{x}$ to denote the sample mean of $\mathbf{x}$, and $s_x$ is the sample standard deviation of $\mathbf{x}$. ## Part I For each of the following three transformations derive (a) the sample mean $\overline{z}$, and (b) the sample standard deviation $s_z$. 1. Centering $$ z_i = (x_i - \overline{x}) $$ 2. Scaling $$ z_i = \frac{x_i}{s_x} $$ 3. Centering and scaling (standardizing) $$ z_i = \frac{(x_i - \overline{x})}{s_x} $$ ## Part II 4. Consider transformation $z_i = \log{x_i}$. Show that the sample mean $\overline{z}$ equals the logarithm of the geometric sample mean of the original data $x_i$. _Note_: The sample mean we use most commonly is the _arithmetic_ mean ($\overline{x}=\frac{1}{n}\sum_i x_i$). For strictly positive data, especially where there is skew, the _geometric_ mean is a better summary of central trend. It is defined as: $$ \mathrm{gm}(\mathbf{x})=\left( \prod_i x_i \right)^{1/n} $$ So, your problem is to show that $\overline{z} = \log{\mathrm{gm}(\mathbf{x})}$. <file_sep>/materials/lecture-notes/_book/principles-basic-operations.html <!DOCTYPE html> <html lang="" xml:lang=""> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <title>6 Principles: Basic Operations | Lecture Notes: Introduction to Data Science</title> <meta name="description" content="6 Principles: Basic Operations | Lecture Notes: Introduction to Data Science" /> <meta name="generator" content="bookdown 0.18 and GitBook 2.6.7" /> <meta property="og:title" content="6 Principles: Basic Operations | Lecture Notes: Introduction to Data Science" /> <meta property="og:type" content="book" /> <meta name="twitter:card" content="summary" /> <meta name="twitter:title" content="6 Principles: Basic Operations | Lecture Notes: Introduction to Data Science" /> <meta name="author" content="<NAME>" /> <meta name="date" content="2020-04-26" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-status-bar-style" content="black" /> <link rel="prev" href="measurements-and-data-types.html"/> <link rel="next" href="principles-more-operations.html"/> <script src="libs/jquery-2.2.3/jquery.min.js"></script> <link href="libs/gitbook-2.6.7/css/style.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-table.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-bookdown.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-highlight.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-search.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-fontsettings.css" rel="stylesheet" /> <link href="libs/gitbook-2.6.7/css/plugin-clipboard.css" rel="stylesheet" /> <script src="libs/htmlwidgets-1.5.1/htmlwidgets.js"></script> <link href="libs/str_view-0.1.0/str_view.css" rel="stylesheet" /> <script src="libs/str_view-binding-1.4.0/str_view.js"></script> <style type="text/css"> a.sourceLine { display: inline-block; line-height: 1.25; } a.sourceLine { pointer-events: none; color: inherit; text-decoration: inherit; } a.sourceLine:empty { height: 1.2em; position: absolute; } .sourceCode { overflow: visible; } code.sourceCode { white-space: pre; position: relative; } pre.sourceCode { margin: 0; } @media screen { div.sourceCode { overflow: auto; } } @media print { code.sourceCode { white-space: pre-wrap; } a.sourceLine { text-indent: -1em; padding-left: 1em; } } pre.numberSource a.sourceLine { position: relative; } pre.numberSource a.sourceLine:empty { position: absolute; } pre.numberSource a.sourceLine::before { content: attr(data-line-number); position: absolute; left: -5em; text-align: right; vertical-align: baseline; border: none; pointer-events: all; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; padding: 0 4px; width: 4em; color: #aaaaaa; } pre.numberSource { margin-left: 3em; border-left: 1px solid #aaaaaa; padding-left: 4px; } div.sourceCode { } @media screen { a.sourceLine::before { text-decoration: underline; } } code span.al { color: #ff0000; font-weight: bold; } /* Alert */ code span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */ code span.at { color: #7d9029; } /* Attribute */ code span.bn { color: #40a070; } /* BaseN */ code span.bu { } /* BuiltIn */ code span.cf { color: #007020; font-weight: bold; } /* ControlFlow */ code span.ch { color: #4070a0; } /* Char */ code span.cn { color: #880000; } /* Constant */ code span.co { color: #60a0b0; font-style: italic; } /* Comment */ code span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */ code span.do { color: #ba2121; font-style: italic; } /* Documentation */ code span.dt { color: #902000; } /* DataType */ code span.dv { color: #40a070; } /* DecVal */ code span.er { color: #ff0000; font-weight: bold; } /* Error */ code span.ex { } /* Extension */ code span.fl { color: #40a070; } /* Float */ code span.fu { color: #06287e; } /* Function */ code span.im { } /* Import */ code span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */ code span.kw { color: #007020; font-weight: bold; } /* Keyword */ code span.op { color: #666666; } /* Operator */ code span.ot { color: #007020; } /* Other */ code span.pp { color: #bc7a00; } /* Preprocessor */ code span.sc { color: #4070a0; } /* SpecialChar */ code span.ss { color: #bb6688; } /* SpecialString */ code span.st { color: #4070a0; } /* String */ code span.va { color: #19177c; } /* Variable */ code span.vs { color: #4070a0; } /* VerbatimString */ code span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */ </style> </head> <body> <div class="book without-animation with-summary font-size-2 font-family-1" data-basepath="."> <div class="book-summary"> <nav role="navigation"> <ul class="summary"> <li><a href="http://bit.ly/hcb-ids">CMSC320 Intro. Data Science</a></li> <li><a href="http://www.hcbravo.org"><NAME></a></li> <li class="divider"></li> <li class="chapter" data-level="1" data-path="index.html"><a href="index.html"><i class="fa fa-check"></i><b>1</b> Preamble</a></li> <li class="chapter" data-level="2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html"><i class="fa fa-check"></i><b>2</b> Introduction and Overview</a><ul> <li class="chapter" data-level="2.1" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#what-is-data-science"><i class="fa fa-check"></i><b>2.1</b> What is Data Science?</a><ul> <li class="chapter" data-level="2.1.1" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data"><i class="fa fa-check"></i><b>2.1.1</b> Data</a></li> <li class="chapter" data-level="2.1.2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#specific-questions"><i class="fa fa-check"></i><b>2.1.2</b> Specific Questions</a></li> <li class="chapter" data-level="2.1.3" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#interdisciplinary-activities"><i class="fa fa-check"></i><b>2.1.3</b> Interdisciplinary Activities</a></li> <li class="chapter" data-level="2.1.4" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data-centric-artifacts-and-applications"><i class="fa fa-check"></i><b>2.1.4</b> Data-Centric Artifacts and Applications</a></li> </ul></li> <li class="chapter" data-level="2.2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#why-data-science"><i class="fa fa-check"></i><b>2.2</b> Why Data Science?</a></li> <li class="chapter" data-level="2.3" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data-science-in-society"><i class="fa fa-check"></i><b>2.3</b> Data Science in Society</a></li> <li class="chapter" data-level="2.4" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#course-organization"><i class="fa fa-check"></i><b>2.4</b> Course Organization</a></li> <li class="chapter" data-level="2.5" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#general-workflow"><i class="fa fa-check"></i><b>2.5</b> General Workflow</a><ul> <li class="chapter" data-level="2.5.1" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#defining-the-goal"><i class="fa fa-check"></i><b>2.5.1</b> Defining the Goal</a></li> <li class="chapter" data-level="2.5.2" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#data-collection-and-management"><i class="fa fa-check"></i><b>2.5.2</b> Data Collection and Management</a></li> <li class="chapter" data-level="2.5.3" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#modeling"><i class="fa fa-check"></i><b>2.5.3</b> Modeling</a></li> <li class="chapter" data-level="2.5.4" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#model-evaluation"><i class="fa fa-check"></i><b>2.5.4</b> Model Evaluation</a></li> <li class="chapter" data-level="2.5.5" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#presentation"><i class="fa fa-check"></i><b>2.5.5</b> Presentation</a></li> <li class="chapter" data-level="2.5.6" data-path="introduction-and-overview.html"><a href="introduction-and-overview.html#deployment"><i class="fa fa-check"></i><b>2.5.6</b> Deployment</a></li> </ul></li> </ul></li> <li class="chapter" data-level="3" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html"><i class="fa fa-check"></i><b>3</b> An Illustrative Analysis</a><ul> <li class="chapter" data-level="3.1" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#gathering-data"><i class="fa fa-check"></i><b>3.1</b> Gathering data</a><ul> <li class="chapter" data-level="3.1.1" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#movie-ratings"><i class="fa fa-check"></i><b>3.1.1</b> Movie ratings</a></li> <li class="chapter" data-level="3.1.2" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#movie-budgets-and-revenue"><i class="fa fa-check"></i><b>3.1.2</b> Movie budgets and revenue</a></li> </ul></li> <li class="chapter" data-level="3.2" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#manipulating-the-data"><i class="fa fa-check"></i><b>3.2</b> Manipulating the data</a></li> <li class="chapter" data-level="3.3" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#visualizing-the-data"><i class="fa fa-check"></i><b>3.3</b> Visualizing the data</a></li> <li class="chapter" data-level="3.4" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#modeling-data"><i class="fa fa-check"></i><b>3.4</b> Modeling data</a></li> <li class="chapter" data-level="3.5" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#visualizing-model-result"><i class="fa fa-check"></i><b>3.5</b> Visualizing model result</a></li> <li class="chapter" data-level="3.6" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#abstracting-the-analysis"><i class="fa fa-check"></i><b>3.6</b> Abstracting the analysis</a></li> <li class="chapter" data-level="3.7" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#making-analyses-accessible"><i class="fa fa-check"></i><b>3.7</b> Making analyses accessible</a></li> <li class="chapter" data-level="3.8" data-path="an-illustrative-analysis.html"><a href="an-illustrative-analysis.html#summary"><i class="fa fa-check"></i><b>3.8</b> Summary</a></li> </ul></li> <li class="chapter" data-level="4" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html"><i class="fa fa-check"></i><b>4</b> Setting up the Data Science Toolbox</a><ul> <li class="chapter" data-level="4.1" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#rrstudio"><i class="fa fa-check"></i><b>4.1</b> R/Rstudio</a><ul> <li class="chapter" data-level="4.1.1" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#some-history"><i class="fa fa-check"></i><b>4.1.1</b> Some history</a></li> <li class="chapter" data-level="4.1.2" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#setting-up-r"><i class="fa fa-check"></i><b>4.1.2</b> Setting up R</a></li> <li class="chapter" data-level="4.1.3" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#setting-up-rstudio"><i class="fa fa-check"></i><b>4.1.3</b> Setting up Rstudio</a></li> <li class="chapter" data-level="4.1.4" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#a-first-look-at-rstudio"><i class="fa fa-check"></i><b>4.1.4</b> A first look at Rstudio</a></li> <li class="chapter" data-level="4.1.5" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#interactive-console"><i class="fa fa-check"></i><b>4.1.5</b> Interactive Console</a></li> <li class="chapter" data-level="4.1.6" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#data-viewer"><i class="fa fa-check"></i><b>4.1.6</b> Data Viewer</a></li> <li class="chapter" data-level="4.1.7" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#names-values-and-functions"><i class="fa fa-check"></i><b>4.1.7</b> Names, values and functions</a></li> <li class="chapter" data-level="4.1.8" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#plotting"><i class="fa fa-check"></i><b>4.1.8</b> Plotting</a></li> <li class="chapter" data-level="4.1.9" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#editor"><i class="fa fa-check"></i><b>4.1.9</b> Editor</a></li> <li class="chapter" data-level="4.1.10" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#files-viewer"><i class="fa fa-check"></i><b>4.1.10</b> Files viewer</a></li> <li class="chapter" data-level="4.1.11" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#r-packages"><i class="fa fa-check"></i><b>4.1.11</b> R packages</a></li> <li class="chapter" data-level="4.1.12" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#additional-r-resources"><i class="fa fa-check"></i><b>4.1.12</b> Additional R resources</a></li> <li class="chapter" data-level="4.1.13" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#literate-programming"><i class="fa fa-check"></i><b>4.1.13</b> Literate Programming</a></li> <li class="chapter" data-level="4.1.14" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#course-packages"><i class="fa fa-check"></i><b>4.1.14</b> Course packages</a></li> </ul></li> <li class="chapter" data-level="4.2" data-path="setting-up-the-data-science-toolbox.html"><a href="setting-up-the-data-science-toolbox.html#pythonjupyter"><i class="fa fa-check"></i><b>4.2</b> Python/Jupyter</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-data-representation-modeling-ingestion-and-cleaning.html"><a href="part-data-representation-modeling-ingestion-and-cleaning.html"><i class="fa fa-check"></i>(Part) Data representation modeling, ingestion and cleaning</a></li> <li class="chapter" data-level="5" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html"><i class="fa fa-check"></i><b>5</b> Measurements and Data Types</a><ul> <li class="chapter" data-level="5.1" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#a-data-analysis-to-get-us-going"><i class="fa fa-check"></i><b>5.1</b> A data analysis to get us going</a></li> <li class="chapter" data-level="5.2" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#getting-data"><i class="fa fa-check"></i><b>5.2</b> Getting data</a><ul> <li class="chapter" data-level="5.2.1" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#names-values-and-functions-1"><i class="fa fa-check"></i><b>5.2.1</b> Names, values and functions</a></li> </ul></li> <li class="chapter" data-level="5.3" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#entities-and-attributes"><i class="fa fa-check"></i><b>5.3</b> Entities and attributes</a></li> <li class="chapter" data-level="5.4" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#categorical-attributes"><i class="fa fa-check"></i><b>5.4</b> Categorical attributes</a><ul> <li class="chapter" data-level="5.4.1" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#factors-in-r"><i class="fa fa-check"></i><b>5.4.1</b> Factors in R</a></li> </ul></li> <li class="chapter" data-level="5.5" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#discrete-numeric-attributes"><i class="fa fa-check"></i><b>5.5</b> Discrete numeric attributes</a></li> <li class="chapter" data-level="5.6" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#continuous-numeric-data"><i class="fa fa-check"></i><b>5.6</b> Continuous numeric data</a></li> <li class="chapter" data-level="5.7" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#other-examples"><i class="fa fa-check"></i><b>5.7</b> Other examples</a></li> <li class="chapter" data-level="5.8" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#other-important-datatypes"><i class="fa fa-check"></i><b>5.8</b> Other important datatypes</a></li> <li class="chapter" data-level="5.9" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#units"><i class="fa fa-check"></i><b>5.9</b> Units</a></li> <li class="chapter" data-level="5.10" data-path="measurements-and-data-types.html"><a href="measurements-and-data-types.html#quick-questions"><i class="fa fa-check"></i><b>5.10</b> Quick questions</a></li> </ul></li> <li class="chapter" data-level="6" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html"><i class="fa fa-check"></i><b>6</b> Principles: Basic Operations</a><ul> <li class="chapter" data-level="6.1" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#operations-that-subset-attributes"><i class="fa fa-check"></i><b>6.1</b> Operations that subset attributes</a><ul> <li class="chapter" data-level="6.1.1" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#select"><i class="fa fa-check"></i><b>6.1.1</b> <code>select</code></a></li> <li class="chapter" data-level="6.1.2" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#rename"><i class="fa fa-check"></i><b>6.1.2</b> <code>rename</code></a></li> </ul></li> <li class="chapter" data-level="6.2" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#operations-that-subset-entities"><i class="fa fa-check"></i><b>6.2</b> Operations that subset entities</a><ul> <li class="chapter" data-level="6.2.1" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#slice"><i class="fa fa-check"></i><b>6.2.1</b> <code>slice</code></a></li> <li class="chapter" data-level="6.2.2" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#filter"><i class="fa fa-check"></i><b>6.2.2</b> <code>filter</code></a></li> <li class="chapter" data-level="6.2.3" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#sample_n-and-sample_frac"><i class="fa fa-check"></i><b>6.2.3</b> <code>sample_n</code> and <code>sample_frac</code></a></li> </ul></li> <li class="chapter" data-level="6.3" data-path="principles-basic-operations.html"><a href="principles-basic-operations.html#pipelines-of-operations"><i class="fa fa-check"></i><b>6.3</b> Pipelines of operations</a></li> </ul></li> <li class="chapter" data-level="7" data-path="principles-more-operations.html"><a href="principles-more-operations.html"><i class="fa fa-check"></i><b>7</b> Principles: More Operations</a><ul> <li class="chapter" data-level="7.1" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-sort-entities"><i class="fa fa-check"></i><b>7.1</b> Operations that sort entities</a></li> <li class="chapter" data-level="7.2" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-create-new-attributes"><i class="fa fa-check"></i><b>7.2</b> Operations that create new attributes</a></li> <li class="chapter" data-level="7.3" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-summarize-attribute-values-over-entities"><i class="fa fa-check"></i><b>7.3</b> Operations that summarize attribute values over entities</a></li> <li class="chapter" data-level="7.4" data-path="principles-more-operations.html"><a href="principles-more-operations.html#operations-that-group-entities"><i class="fa fa-check"></i><b>7.4</b> Operations that group entities</a></li> <li class="chapter" data-level="7.5" data-path="principles-more-operations.html"><a href="principles-more-operations.html#vectors"><i class="fa fa-check"></i><b>7.5</b> Vectors</a></li> <li class="chapter" data-level="7.6" data-path="principles-more-operations.html"><a href="principles-more-operations.html#attributes-as-vectors"><i class="fa fa-check"></i><b>7.6</b> Attributes as vectors</a></li> <li class="chapter" data-level="7.7" data-path="principles-more-operations.html"><a href="principles-more-operations.html#functions"><i class="fa fa-check"></i><b>7.7</b> Functions</a></li> </ul></li> <li class="chapter" data-level="8" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html"><i class="fa fa-check"></i><b>8</b> Basic plotting with <code>ggplot</code></a><ul> <li class="chapter" data-level="8.1" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#plot-construction-details"><i class="fa fa-check"></i><b>8.1</b> Plot Construction Details</a><ul> <li class="chapter" data-level="8.1.1" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#mappings"><i class="fa fa-check"></i><b>8.1.1</b> Mappings</a></li> <li class="chapter" data-level="8.1.2" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#representations"><i class="fa fa-check"></i><b>8.1.2</b> Representations</a></li> </ul></li> <li class="chapter" data-level="8.2" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#frequently-used-plots"><i class="fa fa-check"></i><b>8.2</b> Frequently Used Plots</a><ul> <li class="chapter" data-level="8.2.1" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#scatter-plot"><i class="fa fa-check"></i><b>8.2.1</b> Scatter plot</a></li> <li class="chapter" data-level="8.2.2" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#bar-graph"><i class="fa fa-check"></i><b>8.2.2</b> Bar graph</a></li> <li class="chapter" data-level="8.2.3" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#histogram"><i class="fa fa-check"></i><b>8.2.3</b> Histogram</a></li> <li class="chapter" data-level="8.2.4" data-path="basic-plotting-with-ggplot.html"><a href="basic-plotting-with-ggplot.html#boxplot"><i class="fa fa-check"></i><b>8.2.4</b> Boxplot</a></li> </ul></li> </ul></li> <li class="chapter" data-level="9" data-path="brief-introduction-to-rmarkdown.html"><a href="brief-introduction-to-rmarkdown.html"><i class="fa fa-check"></i><b>9</b> Brief Introduction to Rmarkdown</a></li> <li class="chapter" data-level="10" data-path="best-practices-for-data-science-projects.html"><a href="best-practices-for-data-science-projects.html"><i class="fa fa-check"></i><b>10</b> Best Practices for Data Science Projects</a></li> <li class="chapter" data-level="11" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html"><i class="fa fa-check"></i><b>11</b> Tidy Data I: The ER Model</a><ul> <li class="chapter" data-level="11.1" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#overview"><i class="fa fa-check"></i><b>11.1</b> Overview</a></li> <li class="chapter" data-level="11.2" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#the-entity-relationship-and-relational-models"><i class="fa fa-check"></i><b>11.2</b> The Entity-Relationship and Relational Models</a><ul> <li class="chapter" data-level="11.2.1" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#formal-introduction-to-keys"><i class="fa fa-check"></i><b>11.2.1</b> Formal introduction to keys</a></li> </ul></li> <li class="chapter" data-level="11.3" data-path="tidy-data-i-the-er-model.html"><a href="tidy-data-i-the-er-model.html#tidy-data"><i class="fa fa-check"></i><b>11.3</b> Tidy Data</a></li> </ul></li> <li class="chapter" data-level="12" data-path="sql-i-single-table-queries.html"><a href="sql-i-single-table-queries.html"><i class="fa fa-check"></i><b>12</b> SQL I: Single Table Queries</a><ul> <li class="chapter" data-level="12.1" data-path="sql-i-single-table-queries.html"><a href="sql-i-single-table-queries.html#group-by-and-summarize"><i class="fa fa-check"></i><b>12.1</b> Group-by and summarize</a></li> <li class="chapter" data-level="12.2" data-path="sql-i-single-table-queries.html"><a href="sql-i-single-table-queries.html#subqueries"><i class="fa fa-check"></i><b>12.2</b> Subqueries</a></li> </ul></li> <li class="chapter" data-level="13" data-path="two-table-operations.html"><a href="two-table-operations.html"><i class="fa fa-check"></i><b>13</b> Two-table operations</a><ul> <li class="chapter" data-level="13.1" data-path="two-table-operations.html"><a href="two-table-operations.html#left-join"><i class="fa fa-check"></i><b>13.1</b> Left Join</a></li> <li class="chapter" data-level="13.2" data-path="two-table-operations.html"><a href="two-table-operations.html#right-join"><i class="fa fa-check"></i><b>13.2</b> Right Join</a></li> <li class="chapter" data-level="13.3" data-path="two-table-operations.html"><a href="two-table-operations.html#inner-join"><i class="fa fa-check"></i><b>13.3</b> Inner Join</a></li> <li class="chapter" data-level="13.4" data-path="two-table-operations.html"><a href="two-table-operations.html#full-join"><i class="fa fa-check"></i><b>13.4</b> Full Join</a></li> <li class="chapter" data-level="13.5" data-path="two-table-operations.html"><a href="two-table-operations.html#join-conditions"><i class="fa fa-check"></i><b>13.5</b> Join conditions</a></li> <li class="chapter" data-level="13.6" data-path="two-table-operations.html"><a href="two-table-operations.html#filtering-joins"><i class="fa fa-check"></i><b>13.6</b> Filtering Joins</a></li> <li class="chapter" data-level="13.7" data-path="two-table-operations.html"><a href="two-table-operations.html#sql-constructs-multi-table-queries"><i class="fa fa-check"></i><b>13.7</b> SQL Constructs: Multi-table Queries</a></li> </ul></li> <li class="chapter" data-level="14" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html"><i class="fa fa-check"></i><b>14</b> SQL System Constructs</a><ul> <li class="chapter" data-level="14.1" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#sql-as-a-data-definition-language"><i class="fa fa-check"></i><b>14.1</b> SQL as a Data Definition Language</a></li> <li class="chapter" data-level="14.2" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#set-operations-and-comparisons"><i class="fa fa-check"></i><b>14.2</b> Set Operations and Comparisons</a></li> <li class="chapter" data-level="14.3" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#views"><i class="fa fa-check"></i><b>14.3</b> Views</a></li> <li class="chapter" data-level="14.4" data-path="sql-system-constructs.html"><a href="sql-system-constructs.html#nulls"><i class="fa fa-check"></i><b>14.4</b> NULLs</a></li> </ul></li> <li class="chapter" data-level="15" data-path="db-parting-shots.html"><a href="db-parting-shots.html"><i class="fa fa-check"></i><b>15</b> DB Parting Shots</a><ul> <li class="chapter" data-level="15.1" data-path="db-parting-shots.html"><a href="db-parting-shots.html#database-query-optimization"><i class="fa fa-check"></i><b>15.1</b> Database Query Optimization</a></li> <li class="chapter" data-level="15.2" data-path="db-parting-shots.html"><a href="db-parting-shots.html#json-data-model"><i class="fa fa-check"></i><b>15.2</b> JSON Data Model</a></li> </ul></li> <li class="chapter" data-level="16" data-path="ingesting-data.html"><a href="ingesting-data.html"><i class="fa fa-check"></i><b>16</b> Ingesting data</a><ul> <li class="chapter" data-level="16.1" data-path="ingesting-data.html"><a href="ingesting-data.html#structured-ingestion"><i class="fa fa-check"></i><b>16.1</b> Structured ingestion</a><ul> <li class="chapter" data-level="16.1.1" data-path="ingesting-data.html"><a href="ingesting-data.html#csv-files-and-similar"><i class="fa fa-check"></i><b>16.1.1</b> CSV files (and similar)</a></li> <li class="chapter" data-level="16.1.2" data-path="ingesting-data.html"><a href="ingesting-data.html#excel-spreadsheets"><i class="fa fa-check"></i><b>16.1.2</b> Excel spreadsheets</a></li> </ul></li> <li class="chapter" data-level="16.2" data-path="ingesting-data.html"><a href="ingesting-data.html#scraping"><i class="fa fa-check"></i><b>16.2</b> Scraping</a><ul> <li class="chapter" data-level="16.2.1" data-path="ingesting-data.html"><a href="ingesting-data.html#scraping-from-dirty-html-tables"><i class="fa fa-check"></i><b>16.2.1</b> Scraping from dirty HTML tables</a></li> </ul></li> </ul></li> <li class="chapter" data-level="17" data-path="tidying-data.html"><a href="tidying-data.html"><i class="fa fa-check"></i><b>17</b> Tidying data</a><ul> <li class="chapter" data-level="17.1" data-path="tidying-data.html"><a href="tidying-data.html#tidy-data-1"><i class="fa fa-check"></i><b>17.1</b> Tidy Data</a></li> <li class="chapter" data-level="17.2" data-path="tidying-data.html"><a href="tidying-data.html#common-problems-in-messy-data"><i class="fa fa-check"></i><b>17.2</b> Common problems in messy data</a><ul> <li class="chapter" data-level="17.2.1" data-path="tidying-data.html"><a href="tidying-data.html#headers-as-values"><i class="fa fa-check"></i><b>17.2.1</b> Headers as values</a></li> <li class="chapter" data-level="17.2.2" data-path="tidying-data.html"><a href="tidying-data.html#multiple-variables-in-one-column"><i class="fa fa-check"></i><b>17.2.2</b> Multiple variables in one column</a></li> <li class="chapter" data-level="17.2.3" data-path="tidying-data.html"><a href="tidying-data.html#variables-stored-in-both-rows-and-columns"><i class="fa fa-check"></i><b>17.2.3</b> Variables stored in both rows and columns</a></li> <li class="chapter" data-level="17.2.4" data-path="tidying-data.html"><a href="tidying-data.html#multiple-types-in-one-table"><i class="fa fa-check"></i><b>17.2.4</b> Multiple types in one table</a></li> </ul></li> </ul></li> <li class="chapter" data-level="18" data-path="text-and-dates.html"><a href="text-and-dates.html"><i class="fa fa-check"></i><b>18</b> Text and Dates</a><ul> <li class="chapter" data-level="18.1" data-path="text-and-dates.html"><a href="text-and-dates.html#text"><i class="fa fa-check"></i><b>18.1</b> Text</a><ul> <li class="chapter" data-level="18.1.1" data-path="text-and-dates.html"><a href="text-and-dates.html#string-operations"><i class="fa fa-check"></i><b>18.1.1</b> String operations</a></li> <li class="chapter" data-level="18.1.2" data-path="text-and-dates.html"><a href="text-and-dates.html#regular-expressions"><i class="fa fa-check"></i><b>18.1.2</b> Regular expressions</a></li> <li class="chapter" data-level="18.1.3" data-path="text-and-dates.html"><a href="text-and-dates.html#tools-using-regular-expressions"><i class="fa fa-check"></i><b>18.1.3</b> Tools using regular expressions</a></li> <li class="chapter" data-level="18.1.4" data-path="text-and-dates.html"><a href="text-and-dates.html#extracting-attributes-from-text"><i class="fa fa-check"></i><b>18.1.4</b> Extracting attributes from text</a></li> </ul></li> <li class="chapter" data-level="18.2" data-path="text-and-dates.html"><a href="text-and-dates.html#handling-dates"><i class="fa fa-check"></i><b>18.2</b> Handling dates</a></li> </ul></li> <li class="chapter" data-level="19" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html"><i class="fa fa-check"></i><b>19</b> Entity Resolution and Record Linkage</a><ul> <li class="chapter" data-level="19.1" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#problem-definition"><i class="fa fa-check"></i><b>19.1</b> Problem Definition</a></li> <li class="chapter" data-level="19.2" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#one-approach-similarity-function"><i class="fa fa-check"></i><b>19.2</b> One approach: similarity function</a><ul> <li class="chapter" data-level="19.2.1" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#example-attribute-functions"><i class="fa fa-check"></i><b>19.2.1</b> Example attribute functions</a></li> </ul></li> <li class="chapter" data-level="19.3" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#solving-the-resolution-problem"><i class="fa fa-check"></i><b>19.3</b> Solving the resolution problem</a><ul> <li class="chapter" data-level="19.3.1" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#many-to-one-resolutions"><i class="fa fa-check"></i><b>19.3.1</b> Many-to-one resolutions</a></li> <li class="chapter" data-level="19.3.2" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#one-to-one-resolutions"><i class="fa fa-check"></i><b>19.3.2</b> One-to-one resolutions</a></li> <li class="chapter" data-level="19.3.3" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#other-constraints"><i class="fa fa-check"></i><b>19.3.3</b> Other constraints</a></li> </ul></li> <li class="chapter" data-level="19.4" data-path="entity-resolution-and-record-linkage.html"><a href="entity-resolution-and-record-linkage.html#discussion"><i class="fa fa-check"></i><b>19.4</b> Discussion</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-exploratory-data-analysis.html"><a href="part-exploratory-data-analysis.html"><i class="fa fa-check"></i>(Part) Exploratory Data Analysis</a></li> <li class="chapter" data-level="20" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html"><i class="fa fa-check"></i><b>20</b> Exploratory Data Analysis: Visualization</a><ul> <li class="chapter" data-level="20.0.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#eda-exploratory-data-analysis"><i class="fa fa-check"></i><b>20.0.1</b> EDA (Exploratory Data Analysis)</a></li> <li class="chapter" data-level="20.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#visualization-of-single-variables"><i class="fa fa-check"></i><b>20.1</b> Visualization of single variables</a><ul> <li class="chapter" data-level="20.1.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#visualization-of-pairs-of-variables"><i class="fa fa-check"></i><b>20.1.1</b> Visualization of pairs of variables</a></li> </ul></li> <li class="chapter" data-level="20.2" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#eda-with-the-grammar-of-graphics"><i class="fa fa-check"></i><b>20.2</b> EDA with the grammar of graphics</a><ul> <li class="chapter" data-level="20.2.1" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#other-aesthetics"><i class="fa fa-check"></i><b>20.2.1</b> Other aesthetics</a></li> <li class="chapter" data-level="20.2.2" data-path="exploratory-data-analysis-visualization.html"><a href="exploratory-data-analysis-visualization.html#faceting"><i class="fa fa-check"></i><b>20.2.2</b> Faceting</a></li> </ul></li> </ul></li> <li class="chapter" data-level="21" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html"><i class="fa fa-check"></i><b>21</b> Exploratory Data Analysis: Summary Statistics</a><ul> <li class="chapter" data-level="21.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#range"><i class="fa fa-check"></i><b>21.1</b> Range</a></li> <li class="chapter" data-level="21.2" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#central-tendency"><i class="fa fa-check"></i><b>21.2</b> Central Tendency</a><ul> <li class="chapter" data-level="21.2.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#derivation-of-the-mean-as-central-tendency-statistic"><i class="fa fa-check"></i><b>21.2.1</b> Derivation of the mean as central tendency statistic</a></li> </ul></li> <li class="chapter" data-level="21.3" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#spread"><i class="fa fa-check"></i><b>21.3</b> Spread</a><ul> <li class="chapter" data-level="21.3.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#variance"><i class="fa fa-check"></i><b>21.3.1</b> Variance</a></li> <li class="chapter" data-level="21.3.2" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#spread-estimates-using-rank-statistics"><i class="fa fa-check"></i><b>21.3.2</b> Spread estimates using rank statistics</a></li> </ul></li> <li class="chapter" data-level="21.4" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#outliers"><i class="fa fa-check"></i><b>21.4</b> Outliers</a></li> <li class="chapter" data-level="21.5" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#skew"><i class="fa fa-check"></i><b>21.5</b> Skew</a></li> <li class="chapter" data-level="21.6" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#covariance-and-correlation"><i class="fa fa-check"></i><b>21.6</b> Covariance and correlation</a></li> <li class="chapter" data-level="21.7" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#postscript-finding-maximaminima-using-derivatives"><i class="fa fa-check"></i><b>21.7</b> Postscript: Finding Maxima/Minima using Derivatives</a><ul> <li class="chapter" data-level="21.7.1" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#steps-to-find-maximaminima-of-function-fx"><i class="fa fa-check"></i><b>21.7.1</b> Steps to find Maxima/Minima of function <span class="math inline">\(f(x)\)</span></a></li> <li class="chapter" data-level="21.7.2" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#notes-on-finding-derivatives"><i class="fa fa-check"></i><b>21.7.2</b> Notes on Finding Derivatives</a></li> <li class="chapter" data-level="21.7.3" data-path="exploratory-data-analysis-summary-statistics.html"><a href="exploratory-data-analysis-summary-statistics.html#resources"><i class="fa fa-check"></i><b>21.7.3</b> Resources:</a></li> </ul></li> </ul></li> <li class="chapter" data-level="22" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html"><i class="fa fa-check"></i><b>22</b> EDA: Data Transformations</a><ul> <li class="chapter" data-level="22.1" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#centering-and-scaling"><i class="fa fa-check"></i><b>22.1</b> Centering and scaling</a></li> <li class="chapter" data-level="22.2" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#treating-categorical-variables-as-numeric"><i class="fa fa-check"></i><b>22.2</b> Treating categorical variables as numeric</a><ul> <li class="chapter" data-level="22.2.1" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#discretizing-continuous-values."><i class="fa fa-check"></i><b>22.2.1</b> Discretizing continuous values.</a></li> </ul></li> <li class="chapter" data-level="22.3" data-path="eda-data-transformations.html"><a href="eda-data-transformations.html#skewed-data"><i class="fa fa-check"></i><b>22.3</b> Skewed Data</a></li> </ul></li> <li class="chapter" data-level="23" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html"><i class="fa fa-check"></i><b>23</b> EDA: Handling Missing Data</a><ul> <li class="chapter" data-level="23.1" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#mechanisms-of-missing-data"><i class="fa fa-check"></i><b>23.1</b> Mechanisms of missing data</a></li> <li class="chapter" data-level="23.2" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#handling-missing-data"><i class="fa fa-check"></i><b>23.2</b> Handling missing data</a><ul> <li class="chapter" data-level="23.2.1" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#removing-missing-data"><i class="fa fa-check"></i><b>23.2.1</b> Removing missing data</a></li> <li class="chapter" data-level="23.2.2" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#encoding-as-missing"><i class="fa fa-check"></i><b>23.2.2</b> Encoding as missing</a></li> <li class="chapter" data-level="23.2.3" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#imputation"><i class="fa fa-check"></i><b>23.2.3</b> Imputation</a></li> </ul></li> <li class="chapter" data-level="23.3" data-path="eda-handling-missing-data.html"><a href="eda-handling-missing-data.html#implications-of-imputation"><i class="fa fa-check"></i><b>23.3</b> Implications of imputation</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-statistical-learning.html"><a href="part-statistical-learning.html"><i class="fa fa-check"></i>(Part) Statistical Learning</a></li> <li class="chapter" data-level="24" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html"><i class="fa fa-check"></i><b>24</b> Univariate distributions and statistics</a><ul> <li class="chapter" data-level="24.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#variation-randomness-and-stochasticity"><i class="fa fa-check"></i><b>24.1</b> Variation, randomness and stochasticity</a><ul> <li class="chapter" data-level="24.1.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#random-variables"><i class="fa fa-check"></i><b>24.1.1</b> Random variables</a></li> </ul></li> <li class="chapter" data-level="24.2" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#discrete-probability-distributions"><i class="fa fa-check"></i><b>24.2</b> (Discrete) Probability distributions</a><ul> <li class="chapter" data-level="24.2.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#example-the-oracle-of-tweet"><i class="fa fa-check"></i><b>24.2.1</b> Example The oracle of TWEET</a></li> </ul></li> <li class="chapter" data-level="24.3" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#expectation"><i class="fa fa-check"></i><b>24.3</b> Expectation</a></li> <li class="chapter" data-level="24.4" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#estimation"><i class="fa fa-check"></i><b>24.4</b> Estimation</a><ul> <li class="chapter" data-level="24.4.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#law-of-large-numbers-lln"><i class="fa fa-check"></i><b>24.4.1</b> Law of large numbers (LLN)</a></li> <li class="chapter" data-level="24.4.2" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#central-limit-theorem-clt"><i class="fa fa-check"></i><b>24.4.2</b> Central Limit Theorem (CLT)</a></li> </ul></li> <li class="chapter" data-level="24.5" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#the-normal-distribution"><i class="fa fa-check"></i><b>24.5</b> The normal distribution</a><ul> <li class="chapter" data-level="24.5.1" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#clt-continued"><i class="fa fa-check"></i><b>24.5.1</b> CLT continued</a></li> </ul></li> <li class="chapter" data-level="24.6" data-path="univariate-distributions-and-statistics.html"><a href="univariate-distributions-and-statistics.html#the-bootstrap-procedure"><i class="fa fa-check"></i><b>24.6</b> The Bootstrap Procedure</a></li> </ul></li> <li class="chapter" data-level="25" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html"><i class="fa fa-check"></i><b>25</b> Experiment design and hypothesis testing</a><ul> <li class="chapter" data-level="25.1" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#inference"><i class="fa fa-check"></i><b>25.1</b> Inference</a><ul> <li class="chapter" data-level="25.1.1" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#hypothesis-testing"><i class="fa fa-check"></i><b>25.1.1</b> Hypothesis testing</a></li> </ul></li> <li class="chapter" data-level="25.2" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#ab-testing"><i class="fa fa-check"></i><b>25.2</b> A/B Testing</a></li> <li class="chapter" data-level="25.3" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#summary-1"><i class="fa fa-check"></i><b>25.3</b> Summary</a></li> <li class="chapter" data-level="25.4" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#probability-distributions"><i class="fa fa-check"></i><b>25.4</b> Probability Distributions</a><ul> <li class="chapter" data-level="25.4.1" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#bernoulli"><i class="fa fa-check"></i><b>25.4.1</b> Bernoulli</a></li> <li class="chapter" data-level="25.4.2" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#binomial"><i class="fa fa-check"></i><b>25.4.2</b> Binomial</a></li> <li class="chapter" data-level="25.4.3" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#normal-gaussian-distribution"><i class="fa fa-check"></i><b>25.4.3</b> Normal (Gaussian) distribution</a></li> <li class="chapter" data-level="25.4.4" data-path="experiment-design-and-hypothesis-testing.html"><a href="experiment-design-and-hypothesis-testing.html#distributions-in-r"><i class="fa fa-check"></i><b>25.4.4</b> Distributions in R</a></li> </ul></li> </ul></li> <li class="chapter" data-level="26" data-path="multivariate-probability.html"><a href="multivariate-probability.html"><i class="fa fa-check"></i><b>26</b> Multivariate probability</a><ul> <li class="chapter" data-level="26.1" data-path="multivariate-probability.html"><a href="multivariate-probability.html#joint-and-conditional-probability"><i class="fa fa-check"></i><b>26.1</b> Joint and conditional probability</a></li> <li class="chapter" data-level="26.2" data-path="multivariate-probability.html"><a href="multivariate-probability.html#bayes-rule"><i class="fa fa-check"></i><b>26.2</b> Bayes’ Rule</a></li> <li class="chapter" data-level="26.3" data-path="multivariate-probability.html"><a href="multivariate-probability.html#conditional-expectation"><i class="fa fa-check"></i><b>26.3</b> Conditional expectation</a></li> <li class="chapter" data-level="26.4" data-path="multivariate-probability.html"><a href="multivariate-probability.html#maximum-likelihood"><i class="fa fa-check"></i><b>26.4</b> Maximum likelihood</a></li> </ul></li> <li class="chapter" data-level="" data-path="part-machine-learning.html"><a href="part-machine-learning.html"><i class="fa fa-check"></i>(Part) Machine Learning</a></li> <li class="chapter" data-level="27" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html"><i class="fa fa-check"></i><b>27</b> Data Analysis with Geometry</a><ul> <li class="chapter" data-level="27.1" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#motivating-example-credit-analysis"><i class="fa fa-check"></i><b>27.1</b> Motivating Example: Credit Analysis</a></li> <li class="chapter" data-level="27.2" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#from-data-to-feature-vectors"><i class="fa fa-check"></i><b>27.2</b> From data to feature vectors</a></li> <li class="chapter" data-level="27.3" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#technical-notation"><i class="fa fa-check"></i><b>27.3</b> Technical notation</a></li> <li class="chapter" data-level="27.4" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#geometry-and-distances"><i class="fa fa-check"></i><b>27.4</b> Geometry and Distances</a><ul> <li class="chapter" data-level="27.4.1" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#k-nearest-neighbor-classification"><i class="fa fa-check"></i><b>27.4.1</b> K-nearest neighbor classification</a></li> <li class="chapter" data-level="27.4.2" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#the-importance-of-transformations"><i class="fa fa-check"></i><b>27.4.2</b> The importance of transformations</a></li> </ul></li> <li class="chapter" data-level="27.5" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#quick-vector-algebra-review"><i class="fa fa-check"></i><b>27.5</b> Quick vector algebra review</a><ul> <li class="chapter" data-level="27.5.1" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#quiz"><i class="fa fa-check"></i><b>27.5.1</b> Quiz</a></li> </ul></li> <li class="chapter" data-level="27.6" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#the-curse-of-dimensionality"><i class="fa fa-check"></i><b>27.6</b> The curse of dimensionality</a></li> <li class="chapter" data-level="27.7" data-path="data-analysis-with-geometry.html"><a href="data-analysis-with-geometry.html#summary-2"><i class="fa fa-check"></i><b>27.7</b> Summary</a></li> </ul></li> <li class="chapter" data-level="28" data-path="linear-regression.html"><a href="linear-regression.html"><i class="fa fa-check"></i><b>28</b> Linear Regression</a><ul> <li class="chapter" data-level="28.1" data-path="linear-regression.html"><a href="linear-regression.html#simple-regression"><i class="fa fa-check"></i><b>28.1</b> Simple Regression</a></li> <li class="chapter" data-level="28.2" data-path="linear-regression.html"><a href="linear-regression.html#inference-1"><i class="fa fa-check"></i><b>28.2</b> Inference</a><ul> <li class="chapter" data-level="28.2.1" data-path="linear-regression.html"><a href="linear-regression.html#confidence-interval"><i class="fa fa-check"></i><b>28.2.1</b> Confidence Interval</a></li> <li class="chapter" data-level="28.2.2" data-path="linear-regression.html"><a href="linear-regression.html#the-t-statistic-and-the-t-distribution"><i class="fa fa-check"></i><b>28.2.2</b> The <span class="math inline">\(t\)</span>-statistic and the <span class="math inline">\(t\)</span>-distribution</a></li> <li class="chapter" data-level="28.2.3" data-path="linear-regression.html"><a href="linear-regression.html#global-fit"><i class="fa fa-check"></i><b>28.2.3</b> Global Fit</a></li> </ul></li> <li class="chapter" data-level="28.3" data-path="linear-regression.html"><a href="linear-regression.html#some-important-technicalities"><i class="fa fa-check"></i><b>28.3</b> Some important technicalities</a></li> <li class="chapter" data-level="28.4" data-path="linear-regression.html"><a href="linear-regression.html#issues-with-linear-regression"><i class="fa fa-check"></i><b>28.4</b> Issues with linear regression</a><ul> <li class="chapter" data-level="28.4.1" data-path="linear-regression.html"><a href="linear-regression.html#non-linearity-of-outcome-predictor-relationship"><i class="fa fa-check"></i><b>28.4.1</b> Non-linearity of outcome-predictor relationship</a></li> <li class="chapter" data-level="28.4.2" data-path="linear-regression.html"><a href="linear-regression.html#correlated-error"><i class="fa fa-check"></i><b>28.4.2</b> Correlated Error</a></li> <li class="chapter" data-level="28.4.3" data-path="linear-regression.html"><a href="linear-regression.html#non-constant-variance"><i class="fa fa-check"></i><b>28.4.3</b> Non-constant variance</a></li> </ul></li> <li class="chapter" data-level="28.5" data-path="linear-regression.html"><a href="linear-regression.html#multiple-linear-regression"><i class="fa fa-check"></i><b>28.5</b> Multiple linear regression</a><ul> <li class="chapter" data-level="28.5.1" data-path="linear-regression.html"><a href="linear-regression.html#estimation-in-multivariate-regression"><i class="fa fa-check"></i><b>28.5.1</b> Estimation in multivariate regression</a></li> <li class="chapter" data-level="28.5.2" data-path="linear-regression.html"><a href="linear-regression.html#example-contd"><i class="fa fa-check"></i><b>28.5.2</b> Example (cont’d)</a></li> <li class="chapter" data-level="28.5.3" data-path="linear-regression.html"><a href="linear-regression.html#statistical-statements-contd"><i class="fa fa-check"></i><b>28.5.3</b> Statistical statements (cont’d)</a></li> <li class="chapter" data-level="28.5.4" data-path="linear-regression.html"><a href="linear-regression.html#the-f-test"><i class="fa fa-check"></i><b>28.5.4</b> The F-test</a></li> <li class="chapter" data-level="28.5.5" data-path="linear-regression.html"><a href="linear-regression.html#categorical-predictors-contd"><i class="fa fa-check"></i><b>28.5.5</b> Categorical predictors (cont’d)</a></li> </ul></li> <li class="chapter" data-level="28.6" data-path="linear-regression.html"><a href="linear-regression.html#interactions-in-linear-models"><i class="fa fa-check"></i><b>28.6</b> Interactions in linear models</a><ul> <li class="chapter" data-level="28.6.1" data-path="linear-regression.html"><a href="linear-regression.html#additional-issues-with-linear-regression"><i class="fa fa-check"></i><b>28.6.1</b> Additional issues with linear regression</a></li> </ul></li> </ul></li> <li class="chapter" data-level="29" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html"><i class="fa fa-check"></i><b>29</b> Linear models for classification</a><ul> <li class="chapter" data-level="29.1" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#an-example-classification-problem"><i class="fa fa-check"></i><b>29.1</b> An example classification problem</a></li> <li class="chapter" data-level="29.2" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#why-not-linear-regression"><i class="fa fa-check"></i><b>29.2</b> Why not linear regression?</a></li> <li class="chapter" data-level="29.3" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#classification-as-probability-estimation-problem"><i class="fa fa-check"></i><b>29.3</b> Classification as probability estimation problem</a></li> <li class="chapter" data-level="29.4" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#logistic-regression"><i class="fa fa-check"></i><b>29.4</b> Logistic regression</a><ul> <li class="chapter" data-level="29.4.1" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#exercises"><i class="fa fa-check"></i><b>29.4.1</b> Exercises</a></li> <li class="chapter" data-level="29.4.2" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#making-predictions"><i class="fa fa-check"></i><b>29.4.2</b> Making predictions</a></li> <li class="chapter" data-level="29.4.3" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#multiple-logistic-regression"><i class="fa fa-check"></i><b>29.4.3</b> Multiple logistic regression</a></li> <li class="chapter" data-level="29.4.4" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#exercise"><i class="fa fa-check"></i><b>29.4.4</b> Exercise</a></li> </ul></li> <li class="chapter" data-level="29.5" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#linear-discriminant-analysis"><i class="fa fa-check"></i><b>29.5</b> Linear Discriminant Analysis</a><ul> <li class="chapter" data-level="29.5.1" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#how-to-train-lda"><i class="fa fa-check"></i><b>29.5.1</b> How to train LDA</a></li> </ul></li> <li class="chapter" data-level="29.6" data-path="linear-models-for-classification.html"><a href="linear-models-for-classification.html#summary-3"><i class="fa fa-check"></i><b>29.6</b> Summary</a></li> </ul></li> <li class="chapter" data-level="30" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html"><i class="fa fa-check"></i><b>30</b> Solving linear ML problems</a><ul> <li class="chapter" data-level="30.1" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#case-study"><i class="fa fa-check"></i><b>30.1</b> Case Study</a></li> <li class="chapter" data-level="30.2" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#gradient-descent"><i class="fa fa-check"></i><b>30.2</b> Gradient Descent</a><ul> <li class="chapter" data-level="30.2.1" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#logistic-regression-1"><i class="fa fa-check"></i><b>30.2.1</b> Logistic Regression</a></li> </ul></li> <li class="chapter" data-level="30.3" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#stochastic-gradient-descent"><i class="fa fa-check"></i><b>30.3</b> Stochastic gradient descent</a></li> <li class="chapter" data-level="30.4" data-path="solving-linear-ml-problems.html"><a href="solving-linear-ml-problems.html#parallelizing-gradient-descent"><i class="fa fa-check"></i><b>30.4</b> Parallelizing gradient descent</a></li> </ul></li> <li class="chapter" data-level="31" data-path="tree-based-methods.html"><a href="tree-based-methods.html"><i class="fa fa-check"></i><b>31</b> Tree-Based Methods</a><ul> <li class="chapter" data-level="31.1" data-path="tree-based-methods.html"><a href="tree-based-methods.html#regression-trees"><i class="fa fa-check"></i><b>31.1</b> Regression Trees</a></li> <li class="chapter" data-level="31.2" data-path="tree-based-methods.html"><a href="tree-based-methods.html#classification-decision-trees"><i class="fa fa-check"></i><b>31.2</b> Classification (Decision) Trees</a></li> <li class="chapter" data-level="31.3" data-path="tree-based-methods.html"><a href="tree-based-methods.html#specifics-of-the-partitioning-algorithm"><i class="fa fa-check"></i><b>31.3</b> Specifics of the partitioning algorithm</a><ul> <li class="chapter" data-level="31.3.1" data-path="tree-based-methods.html"><a href="tree-based-methods.html#the-predictor-space"><i class="fa fa-check"></i><b>31.3.1</b> The predictor space</a></li> <li class="chapter" data-level="31.3.2" data-path="tree-based-methods.html"><a href="tree-based-methods.html#learning-strategy"><i class="fa fa-check"></i><b>31.3.2</b> Learning Strategy</a></li> <li class="chapter" data-level="31.3.3" data-path="tree-based-methods.html"><a href="tree-based-methods.html#tree-growing"><i class="fa fa-check"></i><b>31.3.3</b> Tree Growing</a></li> <li class="chapter" data-level="31.3.4" data-path="tree-based-methods.html"><a href="tree-based-methods.html#deviance-as-a-measure-of-impurity"><i class="fa fa-check"></i><b>31.3.4</b> Deviance as a measure of impurity</a></li> <li class="chapter" data-level="31.3.5" data-path="tree-based-methods.html"><a href="tree-based-methods.html#other-measures-of-impurity"><i class="fa fa-check"></i><b>31.3.5</b> Other measures of impurity</a></li> <li class="chapter" data-level="31.3.6" data-path="tree-based-methods.html"><a href="tree-based-methods.html#tree-pruning"><i class="fa fa-check"></i><b>31.3.6</b> Tree Pruning</a></li> </ul></li> <li class="chapter" data-level="31.4" data-path="tree-based-methods.html"><a href="tree-based-methods.html#properties-of-tree-method"><i class="fa fa-check"></i><b>31.4</b> Properties of Tree Method</a></li> <li class="chapter" data-level="31.5" data-path="tree-based-methods.html"><a href="tree-based-methods.html#random-forests"><i class="fa fa-check"></i><b>31.5</b> Random Forests</a></li> <li class="chapter" data-level="31.6" data-path="tree-based-methods.html"><a href="tree-based-methods.html#tree-based-methods-summary"><i class="fa fa-check"></i><b>31.6</b> Tree-based methods summary</a></li> </ul></li> <li class="chapter" data-level="32" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html"><i class="fa fa-check"></i><b>32</b> Model Selection and Evaluation</a><ul> <li class="chapter" data-level="32.1" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#classifier-evaluation"><i class="fa fa-check"></i><b>32.1</b> Classifier evaluation</a></li> <li class="chapter" data-level="32.2" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#model-selection"><i class="fa fa-check"></i><b>32.2</b> Model selection</a><ul> <li class="chapter" data-level="32.2.1" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#cross-validation"><i class="fa fa-check"></i><b>32.2.1</b> Cross Validation</a></li> <li class="chapter" data-level="32.2.2" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#validation-set"><i class="fa fa-check"></i><b>32.2.2</b> Validation Set</a></li> <li class="chapter" data-level="32.2.3" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#resampled-validation-set"><i class="fa fa-check"></i><b>32.2.3</b> Resampled validation set</a></li> <li class="chapter" data-level="32.2.4" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#leave-one-out-cross-validation"><i class="fa fa-check"></i><b>32.2.4</b> Leave-one-out Cross-Validation</a></li> <li class="chapter" data-level="32.2.5" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#k-fold-cross-validation"><i class="fa fa-check"></i><b>32.2.5</b> k-fold Cross-Validation</a></li> <li class="chapter" data-level="32.2.6" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#cross-validation-in-classification"><i class="fa fa-check"></i><b>32.2.6</b> Cross-Validation in Classification</a></li> <li class="chapter" data-level="32.2.7" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#comparing-models-statistically-using-cross-validation"><i class="fa fa-check"></i><b>32.2.7</b> Comparing models statistically using cross-validation</a></li> </ul></li> <li class="chapter" data-level="32.3" data-path="model-selection-and-evaluation.html"><a href="model-selection-and-evaluation.html#summary-4"><i class="fa fa-check"></i><b>32.3</b> Summary</a></li> </ul></li> <li class="chapter" data-level="33" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html"><i class="fa fa-check"></i><b>33</b> Unsupervised Learning: Clustering</a><ul> <li class="chapter" data-level="33.1" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#motivating-example"><i class="fa fa-check"></i><b>33.1</b> Motivating Example</a></li> <li class="chapter" data-level="33.2" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#some-preliminaries"><i class="fa fa-check"></i><b>33.2</b> Some Preliminaries</a></li> <li class="chapter" data-level="33.3" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#cluster-analysis"><i class="fa fa-check"></i><b>33.3</b> Cluster Analysis</a></li> <li class="chapter" data-level="33.4" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#dissimilarity-based-clustering"><i class="fa fa-check"></i><b>33.4</b> Dissimilarity-based Clustering</a></li> <li class="chapter" data-level="33.5" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#k-means-clustering"><i class="fa fa-check"></i><b>33.5</b> K-means Clustering</a></li> <li class="chapter" data-level="33.6" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#choosing-the-number-of-clusters"><i class="fa fa-check"></i><b>33.6</b> Choosing the number of clusters</a></li> <li class="chapter" data-level="33.7" data-path="unsupervised-learning-clustering.html"><a href="unsupervised-learning-clustering.html#summary-5"><i class="fa fa-check"></i><b>33.7</b> Summary</a></li> </ul></li> <li class="chapter" data-level="34" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html"><i class="fa fa-check"></i><b>34</b> Unsupervised Learning: Dimensionality Reduction</a><ul> <li class="chapter" data-level="34.1" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#principal-component-analysis"><i class="fa fa-check"></i><b>34.1</b> Principal Component Analysis</a><ul> <li class="chapter" data-level="34.1.1" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#solving-the-pca"><i class="fa fa-check"></i><b>34.1.1</b> Solving the PCA</a></li> </ul></li> <li class="chapter" data-level="34.2" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#multidimensional-scaling"><i class="fa fa-check"></i><b>34.2</b> Multidimensional Scaling</a></li> <li class="chapter" data-level="34.3" data-path="unsupervised-learning-dimensionality-reduction.html"><a href="unsupervised-learning-dimensionality-reduction.html#summary-6"><i class="fa fa-check"></i><b>34.3</b> Summary</a></li> </ul></li> </ul> </nav> </div> <div class="book-body"> <div class="body-inner"> <div class="book-header" role="navigation"> <h1> <i class="fa fa-circle-o-notch fa-spin"></i><a href="./">Lecture Notes: Introduction to Data Science</a> </h1> </div> <div class="page-wrapper" tabindex="-1" role="main"> <div class="page-inner"> <section class="normal" id="section-"> <div id="principles-basic-operations" class="section level1"> <h1><span class="header-section-number">6</span> Principles: Basic Operations</h1> <p>Now that we have a data frame describing our data, let’s learn a few fundamental operations we perform on data frames on almost any analysis. We divide these first set of operations into two groups: operations on <em>attributes</em> and operations on <em>entitites</em>. These operations are defined in the <code>dplyr</code> package, part of the <code>tidyverse</code>, and are described in more detail in the “R for Data Science” textbook available in the course logistics page: <a href="http://r4ds.had.co.nz/transform.html" class="uri">http://r4ds.had.co.nz/transform.html</a>.</p> <div id="operations-that-subset-attributes" class="section level2"> <h2><span class="header-section-number">6.1</span> Operations that subset attributes</h2> <div id="select" class="section level3"> <h3><span class="header-section-number">6.1.1</span> <code>select</code></h3> <p>In our data set we have a large number of attributes describing each arrest. Now, suppose we only want to study patterns in these arrests based on a smaller number of attributes for purposes of efficiency, since we would operate over less data, or interpretability. In that case we would like to create a data frame that contains only those attributes of interest. We use the <code>select</code> function for this.</p> <p><img src="img/select.png" /></p> <p>Let’s create a data frame containing only the <code>age</code>, <code>sex</code> and <code>district</code> attributes</p> <div class="sourceCode" id="cb26"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb26-1" data-line-number="1"><span class="kw">select</span>(arrest_tab, age, sex, district)</a></code></pre></div> <pre><code>## # A tibble: 104,528 x 3 ## age sex district ## &lt;dbl&gt; &lt;fct&gt; &lt;chr&gt; ## 1 23 M &lt;NA&gt; ## 2 37 M SOUTHERN ## 3 46 M NORTHEASTERN ## 4 50 M WESTERN ## 5 33 M NORTHERN ## 6 41 M SOUTHERN ## 7 29 M WESTERN ## 8 20 M NORTHEASTERN ## 9 24 M &lt;NA&gt; ## 10 53 M NORTHWESTERN ## # … with 104,518 more rows</code></pre> <p>The first argument to the <code>select</code> function is the data frame we want to operate on, the remaining arguments describe the attributes we want to include in the resulting data frame.</p> <p>Note a few other things:</p> <ol style="list-style-type: decimal"> <li><p>The first argument to <code>select</code> is a data frame, and the value returned by <code>select</code> is also a data frame</p></li> <li><p>As always you can learn more about the function using <code>?select</code></p></li> </ol> <p>Attribute descriptor arguments can be fairly sophisticated. For example, we can use positive integers to indicate attribute (column) indices:</p> <div class="sourceCode" id="cb28"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb28-1" data-line-number="1"><span class="kw">select</span>(arrest_tab, <span class="dv">1</span>, <span class="dv">3</span>, <span class="dv">4</span>)</a></code></pre></div> <pre><code>## # A tibble: 104,528 x 3 ## arrest race sex ## &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; ## 1 11126858 B M ## 2 11127013 B M ## 3 11126887 B M ## 4 11126873 B M ## 5 11126968 B M ## 6 11127041 B M ## 7 11126932 B M ## 8 11126940 W M ## 9 11127051 B M ## 10 11127018 B M ## # … with 104,518 more rows</code></pre> <p>R includes a useful operator to describe ranges. E.g., <code>1:5</code> would be attributes 1 through 5:</p> <div class="sourceCode" id="cb30"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb30-1" data-line-number="1"><span class="kw">select</span>(arrest_tab, <span class="dv">1</span><span class="op">:</span><span class="dv">5</span>)</a></code></pre></div> <pre><code>## # A tibble: 104,528 x 5 ## arrest age race sex arrestDate ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; ## 1 11126858 23 B M 01/01/2011 ## 2 11127013 37 B M 01/01/2011 ## 3 11126887 46 B M 01/01/2011 ## 4 11126873 50 B M 01/01/2011 ## 5 11126968 33 B M 01/01/2011 ## 6 11127041 41 B M 01/01/2011 ## 7 11126932 29 B M 01/01/2011 ## 8 11126940 20 W M 01/01/2011 ## 9 11127051 24 B M 01/01/2011 ## 10 11127018 53 B M 01/01/2011 ## # … with 104,518 more rows</code></pre> <p>We can also use other helper functions to create attribute descriptors. For example, to choose all attributes that begin with the letter <code>a</code> we can the <code>starts_with</code> function which uses partial string matching:</p> <div class="sourceCode" id="cb32"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb32-1" data-line-number="1"><span class="kw">select</span>(arrest_tab, <span class="kw">starts_with</span>(<span class="st">&quot;a&quot;</span>))</a></code></pre></div> <pre><code>## # A tibble: 104,528 x 5 ## arrest age arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 23 01/01/2011 00&#39;00&quot; ## 2 1.11e7 37 01/01/2011 01&#39;00&quot; ## 3 1.11e7 46 01/01/2011 01&#39;00&quot; ## 4 1.11e7 50 01/01/2011 04&#39;00&quot; ## 5 1.11e7 33 01/01/2011 05&#39;00&quot; ## 6 1.11e7 41 01/01/2011 05&#39;00&quot; ## 7 1.11e7 29 01/01/2011 05&#39;00&quot; ## 8 1.11e7 20 01/01/2011 05&#39;00&quot; ## 9 1.11e7 24 01/01/2011 07&#39;00&quot; ## 10 1.11e7 53 01/01/2011 15&#39;00&quot; ## # … with 104,518 more rows, and 1 more variable: ## # arrestLocation &lt;chr&gt;</code></pre> <p>We can also use the attribute descriptor arguments to <em>drop</em> attributes. For instance using descriptor <code>-age</code> returns the arrest data frame with all but the <code>age</code> attribute included:</p> <div class="sourceCode" id="cb34"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb34-1" data-line-number="1"><span class="kw">select</span>(arrest_tab, <span class="op">-</span>age)</a></code></pre></div> <pre><code>## # A tibble: 104,528 x 14 ## arrest race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 B M 01/01/2011 00&#39;00&quot; ## 2 1.11e7 B M 01/01/2011 01&#39;00&quot; ## 3 1.11e7 B M 01/01/2011 01&#39;00&quot; ## 4 1.11e7 B M 01/01/2011 04&#39;00&quot; ## 5 1.11e7 B M 01/01/2011 05&#39;00&quot; ## 6 1.11e7 B M 01/01/2011 05&#39;00&quot; ## 7 1.11e7 B M 01/01/2011 05&#39;00&quot; ## 8 1.11e7 W M 01/01/2011 05&#39;00&quot; ## 9 1.11e7 B M 01/01/2011 07&#39;00&quot; ## 10 1.11e7 B M 01/01/2011 15&#39;00&quot; ## # … with 104,518 more rows, and 9 more ## # variables: arrestLocation &lt;chr&gt;, ## # incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> </div> <div id="rename" class="section level3"> <h3><span class="header-section-number">6.1.2</span> <code>rename</code></h3> <p>To improve interpretability during an analysis we may want to rename attributes. We use the <code>rename</code> function for this:</p> <div class="sourceCode" id="cb36"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb36-1" data-line-number="1"><span class="kw">rename</span>(arrest_tab, <span class="dt">arrest_date=</span>arrestDate)</a></code></pre></div> <pre><code>## # A tibble: 104,528 x 15 ## arrest age race sex arrest_date ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; ## 1 1.11e7 23 B M 01/01/2011 ## 2 1.11e7 37 B M 01/01/2011 ## 3 1.11e7 46 B M 01/01/2011 ## 4 1.11e7 50 B M 01/01/2011 ## 5 1.11e7 33 B M 01/01/2011 ## 6 1.11e7 41 B M 01/01/2011 ## 7 1.11e7 29 B M 01/01/2011 ## 8 1.11e7 20 W M 01/01/2011 ## 9 1.11e7 24 B M 01/01/2011 ## 10 1.11e7 53 B M 01/01/2011 ## # … with 104,518 more rows, and 10 more ## # variables: arrestTime &lt;time&gt;, ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> <p>Like <code>select</code>, the first argument to the function is the data frame we are operating on. The remaining arguemnts specify attributes to rename and the name they will have in the resulting data frame. Note that arguments in this case are <em>named</em> (have the form <code>lhs=rhs</code>). We can have selection <em>and</em> renaming by using named arguments in <code>select</code>:</p> <div class="sourceCode" id="cb38"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb38-1" data-line-number="1"><span class="kw">select</span>(arrest_tab, age, sex, <span class="dt">arrest_date=</span>arrestDate)</a></code></pre></div> <pre><code>## # A tibble: 104,528 x 3 ## age sex arrest_date ## &lt;dbl&gt; &lt;fct&gt; &lt;chr&gt; ## 1 23 M 01/01/2011 ## 2 37 M 01/01/2011 ## 3 46 M 01/01/2011 ## 4 50 M 01/01/2011 ## 5 33 M 01/01/2011 ## 6 41 M 01/01/2011 ## 7 29 M 01/01/2011 ## 8 20 M 01/01/2011 ## 9 24 M 01/01/2011 ## 10 53 M 01/01/2011 ## # … with 104,518 more rows</code></pre> <p>Also like <code>select</code>, the result of calling <code>rename</code> is a data frame. In fact, this will be the case for almost all operations in the <code>tidyverse</code> they operate on data frames (specified as the first</p> <p>ment in the function call) and return data frames.</p> </div> </div> <div id="operations-that-subset-entities" class="section level2"> <h2><span class="header-section-number">6.2</span> Operations that subset entities</h2> <p>Next, we look at operations that select entities from a data frame. We will see a few operations to do this: selecting specific entities (rows) by position, selecting them based on attribute properties, and random sampling.</p> <p><img src="img/subset.png" /></p> <div id="slice" class="section level3"> <h3><span class="header-section-number">6.2.1</span> <code>slice</code></h3> <p>We can choose specific entities by their row position. For instance, to choose entities in rows 1,3 and 10, we would use the following:</p> <div class="sourceCode" id="cb40"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb40-1" data-line-number="1"><span class="kw">slice</span>(arrest_tab, <span class="kw">c</span>(<span class="dv">1</span>, <span class="dv">3</span>, <span class="dv">10</span>))</a></code></pre></div> <pre><code>## # A tibble: 3 x 15 ## arrest age race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 23 B M 01/01/2011 00&#39;00&quot; ## 2 1.11e7 46 B M 01/01/2011 01&#39;00&quot; ## 3 1.11e7 53 B M 01/01/2011 15&#39;00&quot; ## # … with 9 more variables: arrestLocation &lt;chr&gt;, ## # incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> <p>As before, the first argument is the data frame to operate on. The second argument is a <em>vector</em> of indices. We used the <code>c</code> function (for concatenate) to create a vector of indices.</p> <p>We can also use the range operator here:</p> <div class="sourceCode" id="cb42"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb42-1" data-line-number="1"><span class="kw">slice</span>(arrest_tab, <span class="dv">1</span><span class="op">:</span><span class="dv">5</span>)</a></code></pre></div> <pre><code>## # A tibble: 5 x 15 ## arrest age race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 23 B M 01/01/2011 00&#39;00&quot; ## 2 1.11e7 37 B M 01/01/2011 01&#39;00&quot; ## 3 1.11e7 46 B M 01/01/2011 01&#39;00&quot; ## 4 1.11e7 50 B M 01/01/2011 04&#39;00&quot; ## 5 1.11e7 33 B M 01/01/2011 05&#39;00&quot; ## # … with 9 more variables: arrestLocation &lt;chr&gt;, ## # incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> <p>To create general sequences of indices we would use the <code>seq</code> function. For example, to select entities in even positions we would use the following:</p> <div class="sourceCode" id="cb44"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb44-1" data-line-number="1"><span class="kw">slice</span>(arrest_tab, <span class="kw">seq</span>(<span class="dv">2</span>, <span class="kw">nrow</span>(arrest_tab), <span class="dt">by=</span><span class="dv">2</span>))</a></code></pre></div> <pre><code>## # A tibble: 52,264 x 15 ## arrest age race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 37 B M 01/01/2011 01&#39;00&quot; ## 2 1.11e7 50 B M 01/01/2011 04&#39;00&quot; ## 3 1.11e7 41 B M 01/01/2011 05&#39;00&quot; ## 4 1.11e7 20 W M 01/01/2011 05&#39;00&quot; ## 5 1.11e7 53 B M 01/01/2011 15&#39;00&quot; ## 6 1.11e7 25 B M 01/01/2011 20&#39;00&quot; ## 7 1.11e7 50 B M 01/01/2011 40&#39;00&quot; ## 8 1.11e7 40 B M 01/01/2011 40&#39;00&quot; ## 9 1.11e7 30 B M 01/01/2011 40&#39;00&quot; ## 10 1.11e7 53 B M 01/01/2011 40&#39;00&quot; ## # … with 52,254 more rows, and 9 more variables: ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> </div> <div id="filter" class="section level3"> <h3><span class="header-section-number">6.2.2</span> <code>filter</code></h3> <p>We can also select entities based on attribute properties. For example, to select arrests where age is less than 18 years old, we would use the following:</p> <div class="sourceCode" id="cb46"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb46-1" data-line-number="1"><span class="kw">filter</span>(arrest_tab, age <span class="op">&lt;</span><span class="st"> </span><span class="dv">18</span>)</a></code></pre></div> <pre><code>## # A tibble: 463 x 15 ## arrest age race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 17 B M 01/03/2011 15:00 ## 2 1.11e7 17 B M 01/07/2011 18:40 ## 3 1.11e7 17 A M 01/10/2011 22:00 ## 4 1.11e7 17 B M 01/13/2011 01:00 ## 5 1.11e7 17 B F 01/13/2011 13:40 ## 6 1.11e7 17 B M 01/13/2011 18:40 ## 7 1.11e7 14 B M 01/17/2011 21:57 ## 8 1.11e7 17 B M 01/18/2011 15:00 ## 9 1.11e7 17 B M 01/18/2011 15:26 ## 10 1.11e7 16 B M 01/18/2011 16:00 ## # … with 453 more rows, and 9 more variables: ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> <p>You know by now what the first argument is…</p> <p>The second argument is an expression that evaluates to a logical value (<code>TRUE</code> or <code>FALSE</code>), if the expression evaluates to TRUE for a given entity (row) then that entity (row) is part of the resulting data frame. Operators used frequently include:</p> <p><code>==</code>, <code>!=</code>: tests equality and inequality respectively (categorical, numerical, datetimes, etc.)<br /> <code>&lt;</code>, <code>&gt;</code>, <code>&lt;=</code>, <code>&gt;=</code>: tests order relationships for ordered data types (not categorical)<br /> <code>!</code>, <code>&amp;</code>, <code>|</code>: not, and, or, logical operators</p> <p>To select arrests with ages between 18 and 25 we can use</p> <div class="sourceCode" id="cb48"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb48-1" data-line-number="1"><span class="kw">filter</span>(arrest_tab, age <span class="op">&gt;=</span><span class="st"> </span><span class="dv">18</span> <span class="op">&amp;</span><span class="st"> </span>age <span class="op">&lt;=</span><span class="st"> </span><span class="dv">25</span>)</a></code></pre></div> <pre><code>## # A tibble: 35,770 x 15 ## arrest age race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 23 B M 01/01/2011 00:00 ## 2 1.11e7 20 W M 01/01/2011 00:05 ## 3 1.11e7 24 B M 01/01/2011 00:07 ## 4 1.11e7 25 B M 01/01/2011 00:20 ## 5 1.11e7 24 B M 01/01/2011 00:40 ## 6 1.11e7 20 B M 01/01/2011 01:22 ## 7 1.11e7 23 B M 01/01/2011 01:30 ## 8 1.11e7 22 A M 01/01/2011 01:40 ## 9 1.11e7 20 W M 01/01/2011 02:00 ## 10 1.11e7 20 B M 01/01/2011 02:20 ## # … with 35,760 more rows, and 9 more variables: ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> <p>The filter function can take multiple logical expressions. In this case they are combined with <code>&amp;</code>. So the above is equivalent to</p> <div class="sourceCode" id="cb50"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb50-1" data-line-number="1"><span class="kw">filter</span>(arrest_tab, age <span class="op">&gt;=</span><span class="st"> </span><span class="dv">18</span>, age <span class="op">&lt;=</span><span class="st"> </span><span class="dv">25</span>)</a></code></pre></div> <pre><code>## # A tibble: 35,770 x 15 ## arrest age race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.11e7 23 B M 01/01/2011 00:00 ## 2 1.11e7 20 W M 01/01/2011 00:05 ## 3 1.11e7 24 B M 01/01/2011 00:07 ## 4 1.11e7 25 B M 01/01/2011 00:20 ## 5 1.11e7 24 B M 01/01/2011 00:40 ## 6 1.11e7 20 B M 01/01/2011 01:22 ## 7 1.11e7 23 B M 01/01/2011 01:30 ## 8 1.11e7 22 A M 01/01/2011 01:40 ## 9 1.11e7 20 W M 01/01/2011 02:00 ## 10 1.11e7 20 B M 01/01/2011 02:20 ## # … with 35,760 more rows, and 9 more variables: ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> </div> <div id="sample_n-and-sample_frac" class="section level3"> <h3><span class="header-section-number">6.2.3</span> <code>sample_n</code> and <code>sample_frac</code></h3> <p>Frequently we will want to choose entities from a data frame at random. The <code>sample_n</code> function selects a specific number of entities at random:</p> <div class="sourceCode" id="cb52"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb52-1" data-line-number="1"><span class="kw">sample_n</span>(arrest_tab, <span class="dv">10</span>)</a></code></pre></div> <pre><code>## # A tibble: 10 x 15 ## arrest age race sex arrestDate ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; ## 1 1.12e7 41 B M 06/08/2011 ## 2 1.26e7 28 B M 12/12/2012 ## 3 1.25e7 21 B M 07/31/2012 ## 4 1.14e7 26 B M 11/13/2011 ## 5 1.13e7 55 B M 08/31/2011 ## 6 1.14e7 25 B M 11/12/2011 ## 7 NA 22 B F 03/09/2011 ## 8 1.26e7 24 B M 10/31/2012 ## 9 NA 37 B M 05/27/2011 ## 10 1.26e7 40 B M 11/13/2012 ## # … with 10 more variables: arrestTime &lt;time&gt;, ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> <p>The <code>sample_frac</code> function selects a fraction of entitites at random:</p> <div class="sourceCode" id="cb54"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb54-1" data-line-number="1"><span class="kw">sample_frac</span>(arrest_tab, <span class="fl">.1</span>)</a></code></pre></div> <pre><code>## # A tibble: 10,453 x 15 ## arrest age race sex arrestDate arrestTime ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; &lt;time&gt; ## 1 1.13e7 53 B M 07/27/2011 09:12 ## 2 1.12e7 25 B M 04/06/2011 19:45 ## 3 1.26e7 20 B M 10/08/2012 17:45 ## 4 1.14e7 19 B M 11/30/2011 19:30 ## 5 1.25e7 25 B M 08/11/2012 18:00 ## 6 1.12e7 25 B M 05/24/2011 18:45 ## 7 1.12e7 20 B M 02/26/2011 12:16 ## 8 1.24e7 44 B M 01/10/2012 13:30 ## 9 1.25e7 20 B M 06/19/2012 23:53 ## 10 1.12e7 41 B M 03/19/2011 15:00 ## # … with 10,443 more rows, and 9 more variables: ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> </div> </div> <div id="pipelines-of-operations" class="section level2"> <h2><span class="header-section-number">6.3</span> Pipelines of operations</h2> <p>All of the functions implementing our first set of operations have the same argument/value structure. They take a data frame as a first argument and return a data frame. We refer to this as the <em>data–&gt;transform–&gt;data</em> pattern. This is the core a lot of what we will do in class as part of data analyses. Specifically, we will combine operations into <em>pipelines</em> that manipulate data frames.</p> <p>The <code>dplyr</code> package introduces <em>syntactic sugar</em> to make this pattern explicit. For instance, we can rewrite the <code>sample_frac</code> example using the “pipe” operator <code>%&gt;%</code>:</p> <div class="sourceCode" id="cb56"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb56-1" data-line-number="1">arrest_tab <span class="op">%&gt;%</span></a> <a class="sourceLine" id="cb56-2" data-line-number="2"><span class="st"> </span><span class="kw">sample_frac</span>(.<span class="dv">1</span>)</a></code></pre></div> <pre><code>## # A tibble: 10,453 x 15 ## arrest age race sex arrestDate ## &lt;dbl&gt; &lt;dbl&gt; &lt;chr&gt; &lt;fct&gt; &lt;chr&gt; ## 1 1.12e7 22 B M 02/18/2011 ## 2 1.26e7 58 B M 11/30/2012 ## 3 NA 28 W F 08/22/2012 ## 4 1.24e7 38 B F 01/08/2012 ## 5 1.24e7 22 B M 03/22/2012 ## 6 1.25e7 43 B F 04/06/2012 ## 7 1.12e7 21 B F 05/11/2011 ## 8 1.25e7 25 B M 07/20/2012 ## 9 1.13e7 18 B M 10/07/2011 ## 10 1.25e7 19 B M 09/01/2012 ## # … with 10,443 more rows, and 10 more ## # variables: arrestTime &lt;time&gt;, ## # arrestLocation &lt;chr&gt;, incidentOffense &lt;chr&gt;, ## # incidentLocation &lt;chr&gt;, charge &lt;chr&gt;, ## # chargeDescription &lt;chr&gt;, district &lt;chr&gt;, ## # post &lt;dbl&gt;, neighborhood &lt;chr&gt;, `Location ## # 1` &lt;chr&gt;</code></pre> <p>The <code>%&gt;%</code> binary operator takes the value to its <strong>left</strong> and inserts it as the first argument of the function call to its <strong>right</strong>. So the expression <code>LHS %&gt;% f(another_argument)</code> is <strong>equivalent</strong> to the expression <code>f(LHS, another_argument)</code>.</p> <p>Using the <code>%&gt;%</code> operator and the <em>data–&gt;transform–&gt;data</em> pattern of the functions we’ve seen so far, we can create pipelines. For example, let’s create a pipeline that:</p> <ol style="list-style-type: decimal"> <li>filters our dataset to arrests between the ages of 18 and 25</li> <li>selects attributes <code>sex</code>, <code>district</code> and <code>arrestDate</code> (renamed as <code>arrest_date</code>)</li> <li>samples 50% of those arrests at random</li> </ol> <p>We will assign the result to variable <code>analysis_tab</code></p> <div class="sourceCode" id="cb58"><pre class="sourceCode r"><code class="sourceCode r"><a class="sourceLine" id="cb58-1" data-line-number="1">analysis_tab &lt;-<span class="st"> </span>arrest_tab <span class="op">%&gt;%</span></a> <a class="sourceLine" id="cb58-2" data-line-number="2"><span class="st"> </span><span class="kw">filter</span>(age <span class="op">&gt;=</span><span class="st"> </span><span class="dv">18</span>, age <span class="op">&lt;=</span><span class="st"> </span><span class="dv">25</span>) <span class="op">%&gt;%</span></a> <a class="sourceLine" id="cb58-3" data-line-number="3"><span class="st"> </span><span class="kw">select</span>(sex, district, <span class="dt">arrest_date=</span>arrestDate) <span class="op">%&gt;%</span></a> <a class="sourceLine" id="cb58-4" data-line-number="4"><span class="st"> </span><span class="kw">sample_frac</span>(.<span class="dv">5</span>)</a> <a class="sourceLine" id="cb58-5" data-line-number="5">analysis_tab</a></code></pre></div> <pre><code>## # A tibble: 17,885 x 3 ## sex district arrest_date ## &lt;fct&gt; &lt;chr&gt; &lt;chr&gt; ## 1 F EASTERN 11/02/2011 ## 2 F SOUTHWESTERN 11/02/2011 ## 3 M EASTERN 11/21/2012 ## 4 M &lt;NA&gt; 10/26/2011 ## 5 M SOUTHEASTERN 05/24/2012 ## 6 M EASTERN 03/20/2012 ## 7 M WESTERN 09/13/2011 ## 8 M EASTERN 06/24/2011 ## 9 F &lt;NA&gt; 10/16/2012 ## 10 M &lt;NA&gt; 02/21/2011 ## # … with 17,875 more rows</code></pre> <p><strong>Exercise</strong>: Create a pipeline that:</p> <ol style="list-style-type: decimal"> <li>filters dataset to arrests from the “SOUTHERN” district occurring before “12:00” (<code>arrestTime</code>)</li> <li>selects attributes, <code>sex</code>, <code>age</code></li> <li>samples 10 entities at random</li> </ol> </div> </div> </section> </div> </div> </div> <a href="measurements-and-data-types.html" class="navigation navigation-prev " aria-label="Previous page"><i class="fa fa-angle-left"></i></a> <a href="principles-more-operations.html" class="navigation navigation-next " aria-label="Next page"><i class="fa fa-angle-right"></i></a> </div> </div> <script src="libs/gitbook-2.6.7/js/app.min.js"></script> <script src="libs/gitbook-2.6.7/js/lunr.js"></script> <script src="libs/gitbook-2.6.7/js/clipboard.min.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-search.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-sharing.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-fontsettings.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-bookdown.js"></script> <script src="libs/gitbook-2.6.7/js/jquery.highlight.js"></script> <script src="libs/gitbook-2.6.7/js/plugin-clipboard.js"></script> <script> gitbook.require(["gitbook"], function(gitbook) { gitbook.start({ "sharing": { "github": false, "facebook": false, "twitter": false, "linkedin": false, "weibo": false, "instapaper": false, "vk": false, "all": ["facebook", "twitter", "linkedin", "weibo", "instapaper"] }, "fontsettings": { "theme": "white", "family": "sans", "size": 2 }, "edit": { "link": null, "text": null }, "history": { "link": null, "text": null }, "view": { "link": null, "text": null }, "download": null, "toc": { "collapse": "section", "scroll_highlight": true } }); }); </script> <!-- dynamically load mathjax for compatibility with self-contained --> <script> (function () { var script = document.createElement("script"); script.type = "text/javascript"; var src = "true"; if (src === "" || src === "true") src = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-MML-AM_CHTML"; if (location.protocol !== "file:") if (/^https?:/.test(src)) src = src.replace(/^https?:/, ''); script.src = src; document.getElementsByTagName("head")[0].appendChild(script); })(); </script> </body> </html> <file_sep>/materials/quizzes/regression_exercise.md --- title: Regression analysis quiz author: CSMC320 fontfamily: utopia geometry: margin=1in --- Name(s): UID(s): Suppose you have data for CMSC undergraduate students including $X_1=$ `grade in 351` (categorical: A,B,C,D, or F) and (numeric) $X_2=$ hours spent learning Java. You want to build a linear regression model of response $Y=$ value of stock options at maturation in first job out of school. a. Since `grade in 351` is categorical, you need to define a set of dummy predictors to include in the linear regression model. How would you do that? How many dummy predictors would you add, and what would be their value be for students with A, B, C, D, or, F in 351. b. Write out the full model (without interactions) in the form $Y=\beta_0+\beta_1 X_1 + \cdots$. How do you interpret the estimate derived for $\beta_0$ and each of the parameters corresponding to the dummy predictors you added in part a. <file_sep>/materials/classroom-scripts/cmsc320_class-script_20190328.R library(tidyverse) data(Auto) # faceting and discretization example Auto %>% mutate(discrete_mpg = cut(mpg, breaks=4)) %>% ggplot(aes(x=weight, y=horsepower)) + geom_point() + facet_wrap(~discrete_mpg) # imputing flights %>% mutate(mean_delay = mean(dep_delay, na.rm=TRUE)) %>% mutate(imputed_delay = ifelse(is.na(dep_delay), mean_delay, dep_delay)) %>% ggplot(aes(x=log(imputed_delay+10))) + geom_histogram() <file_sep>/materials/quizzes/prediction_metrics.md --- title: Classification Quiz author: CMSC320 geometry: margin=1in fontfamily: utopia --- Name(s): UID(s): 1. Suppose an individual has a 25% chance of defaulting on her credit card payment. What are the odds that he will default? 2. Suppose we collect data for a group of students in a programming languages class with variables X1 = hours studied (continuous), X2 = undergrad GPA (continuous), and Y = receive an A (binary, "yes/no"). We fit a logistic regression and $Y \approx \beta_0 + \beta_1 X1 + \beta_2 X2$ and get estimates $\hat{\beta}_0=-6, \hat{\beta}_1=0.05,\hat{\beta}_2=1$. Estimate the probability that a student who studies for 30h and has an undergraduate GPA of 3.5 gets an A in the class. 3. With estimated parameters from previous question, and GPA of 3.5 as before, how many hours would the student need to study to have a 50% chance of getting an A in the class? 4. Consider the following confusion matrix. | | Observed + | Observed - | Total | |--------------|------------|------------|-------| | Predicted + | 80 | 20 | 100 | | Predicted - | 30 | 70 | 100 | | Total | 110 | 90 | | a. How many True Positives are there? b. How many False Negatives are there? c. What is the recall? d. What is the precision? e. What is the True Positive Rate? f. What is the False Positive Rate? <file_sep>/content/homeworks/er_sql.md --- title: "Homework 2: ER Model and SQL" date: "2018-02-20" --- Practice making an ER diagram and writing SQL queries. **DUE**: Wednesday Feb. 19, 11:59pm <!--more--> ## Setting up: The Lahman baseball database You will be using data from a very useful database on baseball teams, players and seasons curated by <NAME> available at [http://www.seanlahman.com/baseball-archive/statistics/](http://www.seanlahman.com/baseball-archive/statistics/). The database has been made available as a `sqlite` database [https://github.com/jknecht/baseball-archive-sqlite](https://github.com/jknecht/baseball-archive-sqlite). `sqlite` is a light-weight, file-based database management system that is well suited for small projects and prototypes. You can read more about the dataset here: http://seanlahman.com/files/database/readme2016.txt ### Download Database Create a directory for this homework and download the sqlite file into it. Download the file here: https://github.com/jknecht/baseball-archive-sqlite/raw/master/lahman2016.sqlite ### Setup R Make sure the `RSQLite` package is installed, if not install it. Check you can connect to the database from R by evaluating the following code: ```r db <- DBI::dbConnect(RSQLite::SQLite(), "lahman2016.sqlite") DBI::dbListTables(db) DBI::dbDisconnect(db) ``` You should see the list of tables in the Lahman database. Note that this assumes the working directory in the R console contains the SQLite file. Otherwise, you need to provide the full path to the file. #### Download shell file Download the Rmarkdown shell file here: [HW2 Rmarkdown shell](/misc/hw2_er-sql.Rmd) and fill in with your answers. ### Setup Python Make sure the `sqlite3` package is installed, if not install it. Check you can connect to the database from Python by evaluating the following code: ```python con = sqlite3.connect('r/data/lahman2016.sqlite') con.cursor() con.close() ``` #### Download shell notebook Download the jupyter notebook shell here: [HW2 ipynb shell](/misc/hw2_er-sql.ipynb) and fill in with your answers. ### (Optional) SQLite Command Line Interface You may find working with the SQLite command line interface useful as well. You can download here: https://sqlite.org/download.html ## ER Diagram Consider the following subset of tables from the Lahman dataset: - _TeamFranchises_: these are the corporate team entities. Attributes: `franchID`, `franchName`, `active`. - _Teams_: specific teams fielded by a franchise in a given season. Attributes: `yearID`, `teamID`, `lgID`, `franchID`, `G`, `W`, `L`. - _Master_: overall information about the people who play the game. Attributes: `playerID`, `birthYear`, `birthMonth`, `birthDay`, `birthCountry`, `birthState`, `birthCity`, `nameFirst`, `nameLast`, `nameGiven`, `weight`, `height`. - _Batting_: statistics of player performance on offense. Attributes: `playerID`, `yearID`, `teamID`, `lgID`, `G`,`AB`, `H`, `2B`, `3B`, `HR`. - _Salaries_: salary paid by team to player. Attributes: `yearID`, `teamID`, `lgID`, `playerID`, `salary`. Draw an ER diagram describing this Schema. Indicate primary keys as appropriate. ## SQL Exercise Write a SQL query to answer each of the following questions: 1) How many franchises are listed in the database (see [`count`](https://sqlite.org/lang_aggfunc.html#count))? 2) How many franchises are currently active? 3) Which teams won more than 100 games in one season between 2000 and 2015? Order result by descending number of wins. (Attribute `W` of the Teams table contains the number of wins) 4) What is the franchise name of the team with the most total wins in the database? 5) What is the franchise name of the team with the highest winning percentage in a season in the database? (Win percentage is `W/G`) 6) What is the franchise name of the team with the highest single-year payroll between 2000 and 2015? 7) (BONUS from [MDSR book](https://mdsr-book.github.io/)): Identify players (by first and last name) that have attained through their career either a) 500 or more HRs or b) 3000 or more hits (H) _and_ have not been inducted to the Hall of Fame (see `HallOfFame` table). ## Submitting Enter your answers in the shell Rmarkdown file or Jupyter notebook linked above. Export your work to PDF and submit to ELMS. <file_sep>/content/projects/project4/index.md --- date: 2016-11-22T13:04:21-05:00 title: "Project 4: Classification" --- An experiment on classification: [R instructions here](classification_zillow/) [Python instructions here](model_selection.ipynb) ## Submission Follow directions for submission in each of the instructions linked above. All axes in plots should be labeled in an informative manner. Your answers to any question that refers to a plot should include both (a) a text description of your plot, and (b) a sentence or two of interpretation as it relates to the question asked. <file_sep>/materials/lecture-notes/05-basic_operations.Rmd # Principles: Basic Operations Now that we have a data frame describing our data, let's learn a few fundamental operations we perform on data frames on almost any analysis. We divide these first set of operations into two groups: operations on _attributes_ and operations on _entitites_. These operations are defined in the `dplyr` package, part of the `tidyverse`, and are described in more detail in the "R for Data Science" textbook available in the course logistics page: http://r4ds.had.co.nz/transform.html. ## Operations that subset attributes ### `select` In our data set we have a large number of attributes describing each arrest. Now, suppose we only want to study patterns in these arrests based on a smaller number of attributes for purposes of efficiency, since we would operate over less data, or interpretability. In that case we would like to create a data frame that contains only those attributes of interest. We use the `select` function for this. ![](img/select.png) Let's create a data frame containing only the `age`, `sex` and `district` attributes ```{r select_example} select(arrest_tab, age, sex, district) ``` The first argument to the `select` function is the data frame we want to operate on, the remaining arguments describe the attributes we want to include in the resulting data frame. Note a few other things: 1) The first argument to `select` is a data frame, and the value returned by `select` is also a data frame 2) As always you can learn more about the function using `?select` Attribute descriptor arguments can be fairly sophisticated. For example, we can use positive integers to indicate attribute (column) indices: ```{r select_index} select(arrest_tab, 1, 3, 4) ``` R includes a useful operator to describe ranges. E.g., `1:5` would be attributes 1 through 5: ```{r select_range} select(arrest_tab, 1:5) ``` We can also use other helper functions to create attribute descriptors. For example, to choose all attributes that begin with the letter `a` we can the `starts_with` function which uses partial string matching: ```{r select_starts_with} select(arrest_tab, starts_with("a")) ``` We can also use the attribute descriptor arguments to _drop_ attributes. For instance using descriptor `-age` returns the arrest data frame with all but the `age` attribute included: ```{r drop_age} select(arrest_tab, -age) ``` ### `rename` To improve interpretability during an analysis we may want to rename attributes. We use the `rename` function for this: ```{r rename} rename(arrest_tab, arrest_date=arrestDate) ``` Like `select`, the first argument to the function is the data frame we are operating on. The remaining arguemnts specify attributes to rename and the name they will have in the resulting data frame. Note that arguments in this case are _named_ (have the form `lhs=rhs`). We can have selection _and_ renaming by using named arguments in `select`: ```{r rename_select} select(arrest_tab, age, sex, arrest_date=arrestDate) ``` Also like `select`, the result of calling `rename` is a data frame. In fact, this will be the case for almost all operations in the `tidyverse` they operate on data frames (specified as the first ment in the function call) and return data frames. ## Operations that subset entities Next, we look at operations that select entities from a data frame. We will see a few operations to do this: selecting specific entities (rows) by position, selecting them based on attribute properties, and random sampling. ![](img/subset.png) ### `slice` We can choose specific entities by their row position. For instance, to choose entities in rows 1,3 and 10, we would use the following: ```{r slice} slice(arrest_tab, c(1, 3, 10)) ``` As before, the first argument is the data frame to operate on. The second argument is a _vector_ of indices. We used the `c` function (for concatenate) to create a vector of indices. We can also use the range operator here: ```{r slice_range} slice(arrest_tab, 1:5) ``` To create general sequences of indices we would use the `seq` function. For example, to select entities in even positions we would use the following: ```{r slice_even} slice(arrest_tab, seq(2, nrow(arrest_tab), by=2)) ``` ### `filter` We can also select entities based on attribute properties. For example, to select arrests where age is less than 18 years old, we would use the following: ```{r filter} filter(arrest_tab, age < 18) ``` You know by now what the first argument is... The second argument is an expression that evaluates to a logical value (`TRUE` or `FALSE`), if the expression evaluates to TRUE for a given entity (row) then that entity (row) is part of the resulting data frame. Operators used frequently include: `==`, `!=`: tests equality and inequality respectively (categorical, numerical, datetimes, etc.) `<`, `>`, `<=`, `>=`: tests order relationships for ordered data types (not categorical) `!`, `&`, `|`: not, and, or, logical operators To select arrests with ages between 18 and 25 we can use ```{r filter_and} filter(arrest_tab, age >= 18 & age <= 25) ``` The filter function can take multiple logical expressions. In this case they are combined with `&`. So the above is equivalent to ```{r filter_and2} filter(arrest_tab, age >= 18, age <= 25) ``` ### `sample_n` and `sample_frac` Frequently we will want to choose entities from a data frame at random. The `sample_n` function selects a specific number of entities at random: ```{r sample_n} sample_n(arrest_tab, 10) ``` The `sample_frac` function selects a fraction of entitites at random: ```{r sample_frac} sample_frac(arrest_tab, .1) ``` ## Pipelines of operations All of the functions implementing our first set of operations have the same argument/value structure. They take a data frame as a first argument and return a data frame. We refer to this as the _data-->transform-->data_ pattern. This is the core a lot of what we will do in class as part of data analyses. Specifically, we will combine operations into _pipelines_ that manipulate data frames. The `dplyr` package introduces _syntactic sugar_ to make this pattern explicit. For instance, we can rewrite the `sample_frac` example using the "pipe" operator `%>%`: ```{r sample_frac_pipe} arrest_tab %>% sample_frac(.1) ``` The `%>%` binary operator takes the value to its **left** and inserts it as the first argument of the function call to its **right**. So the expression `LHS %>% f(another_argument)` is **equivalent** to the expression `f(LHS, another_argument)`. Using the `%>%` operator and the _data-->transform-->data_ pattern of the functions we've seen so far, we can create pipelines. For example, let's create a pipeline that: 1) filters our dataset to arrests between the ages of 18 and 25 2) selects attributes `sex`, `district` and `arrestDate` (renamed as `arrest_date`) 3) samples 50% of those arrests at random We will assign the result to variable `analysis_tab` ```{r pipeline} analysis_tab <- arrest_tab %>% filter(age >= 18, age <= 25) %>% select(sex, district, arrest_date=arrestDate) %>% sample_frac(.5) analysis_tab ``` **Exercise**: Create a pipeline that: 1) filters dataset to arrests from the "SOUTHERN" district occurring before "12:00" (`arrestTime`) 2) selects attributes, `sex`, `age` 3) samples 10 entities at random <file_sep>/materials/slides/principles/principles.Rmd --- title: "Principles" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: chakra: libs/remark-0.14.0.min.js lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Principles] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] --- ```{r setup1, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` ## Measurements and Data Types ```{r setup, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` ### A data analysis to get us going Analysis of Baltimore crime data. Downloaded from Baltimore City's awesome open data site (this was downloaded a couple of years ago so if you download now, you will get different results). The repository for this particular data is here. [https://data.baltimorecity.gov/Crime/BPD-Arrests/3i3v-ibrt](https://data.baltimorecity.gov/Crime/BPD-Arrests/3i3v-ibrt) --- layout: true ## Getting data --- We've prepared the data previously into a comma-separated value file (`.csv` file): - each column defines attributes that describe arrests - each line contains attribute values (separated by commas) describing specific arrests. --- **Note:** To download this dataset to follow along you can use the following code: ```{r, eval=FALSE} if (!dir.exists("data")) dir.create("data") download.file("https://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv", destfile="data/BPD_Arrests.csv") ``` --- To make use of this dataset we want to assign the result of calling `read_csv` (i.e., the dataset) to a variable: ```{r vars1, message=FALSE} library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") arrest_tab ``` ```{r echo=FALSE, eval=FALSE} arrest_tab$race <- factor(arrest_tab$race) arrest_tab$sex <- factor(arrest_tab$sex) arrest_tab$incidentOffense <- factor(arrest_tab$incidentOffense) ``` --- Now we can ask what _type_ of value is stored in the `arrest_tab` variable: ```{r type} class(arrest_tab) ``` --- The `data.frame` is a workhorse data structure in R. It encapsulates the idea of _entities_ (in rows) and _attribute values_ (in columns). We call these _rectangular datasets_. The other types `tbl_df` and `tbl` are added by `tidyverse` for improved functionality. -- Later, we will see how the `pandas` Python package provides the same semantics. --- We can ask other features of this dataset: ```{r questions} # This is a comment in R, by the way # How many rows (entities) does this dataset contain? nrow(arrest_tab) # How many columns (attributes)? ncol(arrest_tab) # What are the names of those columns? colnames(arrest_tab) ``` --- Now, in Rstudio you can view the data frame using `View(arrest_tab)`. --- layout: true exclude: true ## Names, values and functions --- exclude: true Let's review the concepts of names values and functions again. In the console, we've now written a few instructions, e.g. `View(arrest_tab)`. Let's take a closer look at how these instructions are put together. **_expressions_**: first of all, we call these instructions _expressions_, which are just text that R can evaluate into a value. `View(arrest_tab)` is an expression. **_values_**: so, what's a value? They are numbers, strings, data frames, etc. This is the data we will be working with. The number `2` is a value. So is the string `"Hector"`. So, what value is produced when R evaluates the expression `View(arrest_tab)`? Nothing, which we also treat as a value. That wasn't very interesting, but it does have a side effect: it shows the `arrest_tab` dataset in the Data viewer. How about a simpler expression: `arrest_tab`, what value is produced when R evaluates the expression `arrest_tab`? The data.frame containing that data. Try it out in the console. **_names_**: so if `arrest_tab` isn't a value, what is it? It is a _name_. We use these to refer to values. So, when we write the expression `arrest_tab`, we tell R we want the _value_ referenced by the name `arrest_tab`, that is, the data itself! ![](img/names_values.png) **_functions_**: Besides numbers, strings, data frames, etc. another important type of value is the _function_. Functions are a series of instructions that take some input value and produce a different value. The name `View` refers to the function that takes a data frame as input, and displays it in the Data viewer. Functions are called using the parentheses we saw before: `View(arrest_tab)`, the parentheses say that you are passing input `arrest_tab` to the function `View`. We'll see later how we can write our own functions. --- layout: true ## Entities and attributes --- We use the term _entities_ to refer to the objects represented in a dataset refers to. In our example dataset each arrest is an _entity_. -- In a rectangular dataset (a data frame) this corresponds to rows in a table. --- A dataset contains _attributes_ for each entity. Attributes of each arrest would be: the person's _age_, the type of offense, the location, etc. -- In a rectangular dataset, this corresponds to the columns in a table. --- This language of _entities_ and _attributes_ is commonly used in the database literature. In statistics you may see _experimental units_ or _samples_ for _entities_ and _covariates_ for _attributes_. In other instances _observations_ for _entities_ and _variables_ for _attributes_. In Machine Learning you may see _example_ for _entities_ and _features_ for _attributes_. For the most part, all of these are exchangable. --- This table summarizes the terminology: | Field | Entities | Attributes | |-------|----------|------------| | Databases | Entities | Attributes | | Machine Learning | Examples | Features | | Statistics | Observations/Samples | Variables/Covariates | --- layout: false ## Categorical attributes A categorical attribute for a given entity can take only one of a finite set of examples. For example, the `sex` variable can only have value `M`, `F`, or \`\` (we'll talk about missing data later in the semester). --- layout: true ## Categorical attributes --- The result of a coin flip is categorical The outcome of rolling an 8-sided die, is also categorical Can you think of other examples? --- Categorical data may be _ordered_ or _unordered_ In our example, all categorical data is unordered. -- Examples of _ordered categorical data_ are grades in a class, Likert scale categories, e.g., `strongly agree`, `agree`, `neutral`, `disagree`, `strongly disagree`, etc --- layout: true ## Discrete numeric attributes --- These are attributes that can take specific values from elements of ordered, discrete (possibly infinite) sets. The most common set in this case would be the non-negative positive integers. -- This data is commonly the result of counting processes. In our example dataset, age, measured in years, is a discrete attribute. --- Frequently, we obtain datasets as the result of summarizing, or aggregating other underlying data. In our case, we could construct a new dataset containing the number of arrests per neighborhood (we will see how to do this later) --- ```{r, echo=FALSE} library(dplyr) arrest_tab %>% group_by(neighborhood) %>% summarize(number_of_arrests=n()) %>% head() ``` --- layout: true ## Discrete Numeric Attributes --- In this new dataset, the _entities_ are each neighborhood, the `number_of_arrests` attribute is a _discrete numeric_ attribute. --- Other examples: - the number of students in a class is discrete, - the number of friends for a specific Facebook user. Can you think of other examples? --- Distinctions between ordered categorical and discrete numerical data: _ordered categorical data do not have magnitude_ --- For instance, is an 'A' in a class twice as good as a 'C'? Is a 'C' twice as good as a 'D'? -- Not necessarily. Grades don't have an inherent magnitude. --- However, if we _encode_ grades as 'F=0,D=1,C=2,B=3,A=4', etc. they do have magnitude. In that case, an 'A' _is_ twice as good as a 'C', and a 'C' _is_ twice as good as a 'D'. --- In summary, if ordered data has magnitude, then _discrete numeric_ if not, _ordered categorical_. --- layout: true ## Continuous numeric data --- Attributes that can take any value in a continuous set. For example, a person's height, in say inches, can take any number (within the range of human heights). --- class: split-40 .column[ Different dataset: entities are cars and we look at continuous numeric attributes `speed` and `stopping distance` ] .column[ ```{r f04_cars_plot, echo=FALSE} data(cars) cars %>% ggplot(aes(x=speed, y=dist)) + geom_point(size=3) + theme_bw() + labs(x="speed (mph)", y="stopping distance (ft)") #plot(cars$speed, cars$dist, pch=19, xlab="speed (mph)", #ylab="stopping distance (ft)") ``` ] --- layout: true ## Continuous Numeric Attributes --- The distinction between _continuous_ and _discrete_ can be tricky: measurements that have finite precision are, in a sense, discrete. -- Remember, continuity is _not_ a property of the specific dataset you have in hand, It _is_ a property of the process you are measuring. --- The number of arrests in a neighborhood cannot be fractional, regardless of the precision at which we measure this. -- On the other hand, if we had the appropriate tool, we could measure a person's height with infinite precision. --- This distinction is very important when we build statistical models of datasets for analysis. For now, think of discrete data as the result of counting, and continuous data the result of some physical measurement. -- Here's a question: is `age` in our dataset a continuous or discrete numeric value? --- layout: true ## Other examples --- [MNIST dataset of handwritten digits](https://www.kaggle.com/c/digit-recognizer). Each image is an _entity_. Each image has a _label_ attribute which states which of the digits 0,1,...9 is represented by the image. What type of data is this (categorical, continuous numeric, or discrete numeric)? --- ![](img/mnist_example.png) --- Each image is represented by grayscale values in a 28x28 grid. That's 784 attributes, one for each square in the grid, containing a grayscale value. What type of data are these other 784 attributes? --- layout: true ## Other important datatypes --- - Text: Arbitrary strings that do not encode a categorical attribute. - Datetime: Date and time of some event or observation (e.g., `arrestDate`, `arrestTime`) - Geolocation: Latitude and Longitude of some event or observation (e.g., `Location.`) - Relationships: links between entities, with links having their own attributes (e.g., social network, how long have two people followed each other) --- layout: true ## Units --- Something that we tend to forget but is **extremely** important for the modeling and interpretation of data. Attributes are _measurements_ and that they have _units_. For example, age of a person can be measured in different units: _years_, _months_, etc. --- These can be converted to one another, but nonetheless in a given dataset, that _attribute_ or measurement will be recorded in some specific units. Similar arguments go for distances and times, for example. --- In other cases, we may have unitless measurements (we will see later an example of this when we do _dimensionality reduction_). In these cases, it is worth thinking about _why_ your measurements are unit-less. --- When performing analyses that try to summarize the effect of some measurement or attribute on another, units matter a lot! We will see the importance of this in our _regression_ section. For now, make sure you make a mental note of units for each measurement you come across. Important when modeling and interpreting the results of these models. <file_sep>/content/lecture-note/evaluation/index.md --- date: 2016-11-16T08:09:08-05:00 title: Evaluation --- How to use sampling methods to evaluate statistical models [Lecture notes](Evaluation/) <file_sep>/materials/slides/eda/eda.Rmd --- title: "Exploratory Data Analysis" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Exploratory Data Analysis] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r setup1, echo=FALSE, message=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) library(tidyverse) theme_set(theme_bw()) ``` --- layout: true ## Exploratory Data Analysis --- What to do with a dataset before modeling using Statistics or Machine Learning. - Better understand the data at hand, - help us make decisions about appropriate modeling methods, - helpful data transformations that may be helpful to do. --- There are many instances where statistical data modeling is not required to tell a clear and convincing story with data. Many times an effective visualization can lead to convincing conclusions. --- **Goal** Perform an initial exploration of attributes/variables across entities/observations. We will concentrate on exploration of single or pairs of variables. Later on in the course we will see _dimensionality reduction_ methods that are useful in exploration of more than two variables at a time. --- Computing summary statistics - how to interpret them - understand properties of attributes. Data transformations - change properties of variables to help in visualization or modeling. First, how to use visualization for exploratory data analysis. --- Ultimately, the purpose of EDA is to spot problems in data (as part of data wrangling) and understand variable properties like: - central trends (mean) - spread (variance) - skew - outliers This will help us think of possible modeling strategies (e.g., probability distributions) --- layout: true ## Visualization of single variables --- class: split-50 .column[ ```r flights %>% sample_frac(.1) %>% rowid_to_column() %>% ggplot(aes(x=rowid, y=dep_delay)) + geom_point() ``` ] .column[ ```{r, warning=FALSE, dev="png", echo=FALSE, fig.align="center", fig.width=6.5} library(nycflights13) flights %>% sample_frac(.1) %>% rowid_to_column() %>% ggplot(aes(x=rowid, y=dep_delay)) + geom_point() ``` ] --- class: split-50 .column[ ```r flights %>% sample_frac(.1) %>% arrange(dep_delay) %>% rowid_to_column() %>% ggplot(aes(x=rowid, y=dep_delay)) + geom_point() ``` ] .column[ ```{r, warning=FALSE, dev="png",fig.align="center", fig.width=6.5,echo=FALSE} flights %>% sample_frac(.1) %>% arrange(dep_delay) %>% rowid_to_column() %>% ggplot(aes(x=rowid, y=dep_delay)) + geom_point() ``` ] --- What can we make of that plot now? Start thinking of _central tendency_, _spread_ and _skew_ as you look at that plot. Let's now create a graphical summary of that variable to incorporate observations made from this initial plot. Let's start with a _histogram_: it divides the _range_ of the `dep_delay` attribute into **equal-sized** bins, then plots the number of observations within each bin. --- class: split-50 .column[ ```r flights %>% ggplot(aes(x=dep_delay)) + geom_histogram() ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5, warning=FALSE, message=FALSE} flights %>% ggplot(aes(x=dep_delay)) + geom_histogram() ``` ] --- **Density plot** We can (conceptually) make the bins as small as possible and get a smooth curve that describes the _distribution_ of values of the `dep_delay` variable. --- class: split-50 .column[ ```r flights %>% ggplot(aes(x=dep_delay)) + geom_density() ``` ] .column[ ```{r, echo=FALSE, fig.width=6.5, fig.align="center", message=FALSE, warning=FALSE} flights %>% ggplot(aes(x=dep_delay)) + geom_density() ``` ] --- **Boxplot** Succint graphical summary of the distribution of a variable. --- class: split-50 .column[ ```r flights %>% ggplot(aes(x='',y=dep_delay)) + geom_boxplot() ``` ] .column[ ```{r, dev="png", echo=FALSE, fig.width=6.5,fig.align="center",warning=FALSE} flights %>% ggplot(aes(x='',y=dep_delay)) + geom_boxplot() ``` ] --- That's not very clear to see, so let's do a _logarithmic_ transformation of this data to see distribution better. --- class: split-50 .column[ ```r flights %>% mutate(min_delay=min(dep_delay, na.rm=TRUE)) %>% mutate(log_dep_delay = log(dep_delay - min_delay)) %>% ggplot(aes(x='', y=log_dep_delay)) + geom_boxplot() ``` ] .column[ ```{r, dev="png", echo=FALSE, fig.align='center',fig.width=6.5,warning=FALSE} flights %>% mutate(min_delay=min(dep_delay, na.rm=TRUE)) %>% mutate(log_dep_delay = log(dep_delay - min_delay)) %>% ggplot(aes(x='', y=log_dep_delay)) + geom_boxplot() ``` ] --- So what does this represent? (a) central tendency (using the median) is represented by the black line within the box, (b) spread (using inter-quartile range) is represented by the box and whiskers. (c) outliers (data that is _unusually_ outside the spread of the data) --- layout: true ## Visualization of pairs of variables --- How do each of the distributional properties we care about (central trend, spread and skew) of the values of an attribute change based on the value of a different attribute? Suppose we want to see the relationship between `dep_delay`, a _numeric_ variable, and `origin`, a _categorical_ variable. --- Previously, we saw used `group_by`-`summarize` operations to compute attribute summaries based on the value of another attribute. We also called this _conditioning_. In visualization we can start thinking about conditioning as we saw before. Here is how we can see a plot of the distribution of departure delays _conditioned_ on origin airport. --- class: split-50 .column[ ```r flights %>% mutate(min_delay = min(dep_delay, na.rm=TRUE)) %>% mutate(log_dep_delay = log(dep_delay - min_delay)) %>% ggplot(aes(x=origin, y=log_dep_delay)) + geom_boxplot() ``` ] .column[ ```{r, dev="png", echo=FALSE, warning=FALSE,fig.align="center",fig.width=6.5} flights %>% mutate(min_delay = min(dep_delay, na.rm=TRUE)) %>% mutate(log_dep_delay = log(dep_delay - min_delay)) %>% ggplot(aes(x=origin, y=log_dep_delay)) + geom_boxplot() ``` ] --- For pairs of continuous variables, the most useful visualization is the scatter plot. This gives an idea of how one variable varies (in terms of central trend, variance and skew) conditioned on another variable. --- class: split-50 .column[ ```r flights %>% sample_frac(.1) %>% ggplot(aes(x=dep_delay, y=arr_delay)) + geom_point() ``` ] .column[ ```{r, eval=TRUE, dev="png", echo=FALSE, fig.width=6.5, fig.align="center", warning=FALSE} flights %>% sample_frac(.1) %>% ggplot(aes(x=dep_delay, y=arr_delay)) + geom_point() ``` ] --- layout: true ## EDA with the grammar of graphics --- While we have seen a basic repertoire of graphics it's easier to proceed if we have a bit more formal way of thinking about graphics and plots. The central premise is to characterize the building pieces behind plots: 1. The data that goes into a plot, works best when data is tidy 2. The mapping between data and *aesthetic* attributes 3. The *geometric* representation of these attributes --- class: split-50 .column[ ```r batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=R)) + geom_point() ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5} batting <- tbl_df(Batting) batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=R)) + geom_point() ``` ] --- **Data**: Batting table filtering for year **Aesthetic attributes**: - x-axis mapped to variables `AB` - y-axis mapped to variable `R` **Geometric Representation**: points! Now, you can cleanly distinguish the constituent parts of the plot. --- class: split-50 E.g., change the geometric representation .column[ ```r batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=R, label=teamID)) + geom_text() ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5, fig.height=6} # scatter plot of at bats vs. runs for 2010 batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=R, label=teamID)) + geom_text() ``` ] --- class: split-50 E.g., change the data. .column[ ```r # scatter plot of at bats vs. runs for 1995 batting %>% filter(yearID == "1995") %>% ggplot(aes(x=AB, y=R)) + geom_point() ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5, fig.height=6} # scatter plot of at bats vs. runs for 1995 batting %>% filter(yearID == "1995") %>% ggplot(aes(x=AB, y=R)) + geom_point() ``` ] --- class: split-50 E.g., change the aesthetic. .column[ ```r # scatter plot of at bats vs. hits for 2010 batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=H)) + geom_point() ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5, fig.height=6} # scatter plot of at bats vs. hits for 2010 batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=H)) + geom_point() ``` ] --- Let's make a line plot What do we change? (data, aesthetic or geometry?) --- class: split-50 .column[ ```r batting %>% filter(yearID == "2010") %>% sample_n(100) %>% ggplot(aes(x=AB, y=H)) + geom_line() ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5} batting %>% filter(yearID == "2010") %>% sample_n(100) %>% ggplot(aes(x=AB, y=H)) + geom_line() ``` ] --- Let's add a regression line What do we add? (data, aesthetic or geometry?) --- class: split-50 What can we see about central trend, variation and skew with this plot? .column[ ```r batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=H)) + geom_point() + geom_smooth(method=lm) ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5, fig.height=5.5} batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=H)) + geom_point() + geom_smooth(method=lm) ``` ] --- class: split-50 Using other aesthetics we can incorporate information from other variables. .column[ Color: color by categorical variable ```r batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=H, color=lgID)) + geom_point() + geom_smooth(method=lm) ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5, fig.height=5.5} batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=H, color=lgID)) + geom_point() + geom_smooth(method=lm) ``` ] --- class: split-50 .column[ Size: size by (continuous) numeric variable ```r batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=R, size=HR)) + geom_point() + geom_smooth(method=lm) ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5, fig.height=5.5} batting %>% filter(yearID == "2010") %>% ggplot(aes(x=AB, y=R, size=HR)) + geom_point() + geom_smooth(method=lm) ``` ] --- ### Faceting The last major component of exploratory analysis called `faceting` in visualization, corresponds to `conditioning` in statistical modeling, we've seen it as the motivation of `grouping` when wrangling data. --- class: split-50 .column[ ```r batting %>% filter(yearID %in% c("1995", "2000", "2010")) %>% ggplot(aes(x=AB, y=R, size=HR)) + facet_grid(lgID~yearID) + geom_point() + geom_smooth(method=lm) ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6.5} batting %>% filter(yearID %in% c("1995", "2000", "2010")) %>% ggplot(aes(x=AB, y=R, size=HR)) + facet_grid(lgID~yearID) + geom_point() + geom_smooth(method=lm) ``` ] --- layout: true ## Exploratory Data Analysis: Summary Statistics --- Let's continue our discussion of Exploratory Data Analysis. In the previous section we saw ways of visualizing attributes (variables) using plots to start understanding properties of how data is distributed. In this section, we start discussing statistical summaries of data to quantify properties that we observed using visual summaries and representations. --- Remember that one purpose of EDA is to spot problems in data (as part of data wrangling) and understand variable properties like: - central trends (mean) - spread (variance) - skew - suggest possible modeling strategies (e.g., probability distributions) --- One last note on EDA. <NAME> was an exceptional scientist/mathematician, who had profound impact on statistics and Computer Science. A lot of what we cover in EDA is based on his groundbreaking work. [https://www.stat.berkeley.edu/~brill/Papers/life.pdf](https://www.stat.berkeley.edu/~brill/Papers/life.pdf). --- ## Range Part of our goal is to understand how variables are distributed in a given dataset. Note, again, that we are not using _distributed_ in a formal mathematical (or probabilistic) sense. All statements we are making here are based on data at hand, so we could refer to this as the _empirical distribution_ of data. --- class: split-50 Let's use a dataset on diamond characteristics as an example. ```{r,echo=FALSE, fig.align="center", fig.height=5.5,fig.width=6.5} data(diamonds) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) ``` --- ### Notation We assume that we have data across $n$ entitites (or observational units) for $p$ attributes. In this dataset $n=`r nrow(diamonds)`$ and $p=`r ncol(diamonds)`$. However, let's consider a single attribute, and denote the data for that attribute (or variable) as $x_1, x_2, \ldots, x_n$. --- Since we want to understand how data is distributed across a _range_, we should first define the range. ```{r} diamonds %>% summarize(min_depth = min(depth), max_depth = max(depth)) ``` --- We use notation $x_{(1)}$ and $x_{(n)}$ to denote the minimum and maximum statistics. In general, we use notation $x_{(q)}$ for the rank statistics, e.g., the $q$th largest value in the data. --- ### Central Tendency Now that we know the range over which data is distributed, we can figure out a first summary of data is distributed across this range. Let's start with the _center_ of the data: the _median_ is a statistic defined such that half of the data has a smaller value. We can use notation $x_{(n/2)}$ (a rank statistic) to represent the median. --- ```{r, echo=FALSE} diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=median(depth)), color="red") ``` --- ### Derivation of the mean as central tendency statistic Best known statistic for central tendency is the _mean_, or average of the data: $\overline{x} = \frac{1}{n} \sum_{i=1}^n x_i$. It turns out that in this case, we can be a bit more formal about "center" means in this case. Let's say that the _center_ of a dataset is a point in the range of the data that is _close_ to the data. To say that something is _close_ we need a measure of _distance_. --- So for two points $x_1$ and $x_2$ what should we use for distance? The distance between data point $x_1$ and $x_2$ is $(x_1 - x_2)^2$. --- So, to define the _center_, let's build a criterion based on this distance by adding this distance across all points in our dataset: $$ RSS(\mu) = \frac{1}{2} \sum_{i=1}^n (x_i - \mu)^2 $$ Here RSS means _residual sum of squares_, and we $\mu$ to stand for candidate values of _center_. --- We can plot RSS for different values of $\mu$: ```{r, echo=FALSE, fig.align="center",fig.height=5.5} rss <- function(mu) { 0.5 * sum((diamonds$depth - mu)^2)} mu_candidates <- seq(min(diamonds$depth), max(diamonds$depth), len=1000) plot(mu_candidates, sapply(mu_candidates, rss), xlab="Depth", ylab="RSS", type="l", lwd=2, main="Residual Sum of Squares") ``` --- Now, what should our "center" estimate be? We want a value that is _close_ to the data based on RSS! So we need to find the value in the range that minimizes RSS. --- From calculus, we know that a necessary condition for the minimizer $\hat{\mu}$ of RSS is that the derivative of RSS is zero at that point. So, the strategy to minimize RSS is to compute its derivative, and find the value of $\mu$ where it equals zero. --- \begin{align} \frac{\partial}{\partial \mu} \frac{1}{2} \sum_{i=1}^n (x_i - \mu)^2 & = & \frac{1}{2} \sum_{i=1}^n \frac{\partial}{\partial \mu} (x_i - \mu)^2 \; \textrm{(sum rule)}\\ {} & = & \sum_{i=1}^n \mu - \sum_{i=1}^n x_i \\ {} & = & n\mu - \sum_{i=1}^n x_i \end{align} --- ```{r, echo=FALSE, fig.align="center"} rss_deriv <- function(mu) { nrow(diamonds)*mu - sum(diamonds$depth)} plot(mu_candidates, sapply(mu_candidates, rss_deriv), xlab="Depth", ylab="RSS Derivative", type="l", lwd=2, main="Derivative of RSS") abline(h=0,lty=2,lwd=1.6) ``` --- Next, we set that equal to zero and find the value of $\mu$ that solves that equation: \begin{align} \frac{\partial}{\partial \mu} & = & 0 & \Rightarrow \\ n\mu & = & \sum_{i=1}^n x_i & \Rightarrow \\ \mu & = & \frac{1}{n} \sum_{i=1}^n x_i & {} \end{align} --- The fact you should remember: **The mean is the value that minimizes RSS for a vector of attribute values** --- It equals the value where the derivative of RSS is 0: ```{r, echo=FALSE, fig.align="center", fig.height=6} our_mean <- sum(diamonds$depth) / nrow(diamonds) plot(mu_candidates, sapply(mu_candidates, rss_deriv), xlab="Depth", ylab="RSS Derivative", type="l", lwd=1.3) abline(v=our_mean, lwd=2.3, col="blue") abline(h=0, lty=2, lwd=1.6) ``` --- It is the value that minimizes RSS: ```{r, echo=FALSE, fig.align="center", fig.height=6} plot(mu_candidates, sapply(mu_candidates, rss), xlab="Depth", ylab="RSS Derivative", type="l", lwd=1.3) abline(v=our_mean, lwd=2.3, col="blue") ``` --- And it serves as an estimate of central tendency of the dataset: ```{r, echo=FALSE,fig.align="center",fig.height=6} diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=median(depth)), color="red", size=1.3) + geom_vline(aes(xintercept=mean(depth)), color="blue", size=1.3) ``` --- Note that in this dataset the mean and median are not exactly equal, but are very close: ```{r} diamonds %>% summarize(mean_depth = mean(depth), median_depth = median(depth)) ``` --- There is a similar argument to define the median as a measure of _center_. In this case, instead of using RSS we use a different criterion: the sum of absolute deviations $$ SAD(m) = \sum_{i=1}^n |x_i - m|. $$ The median is the minimizer of this criterion. --- ```{r, echo=FALSE,fig.align="center",fig.height=6} sad <- function(m) sum(abs(diamonds$depth - m)) plot(mu_candidates, sapply(mu_candidates, sad), xlab="Depth", ylab="Sum of Absolute Deviations", type="l", lwd=1.3) abline(v=median(diamonds$depth), lwd=2.3, col="red") ``` --- ## Spread Now that we have a measure of center, we can now discuss how data is _spread_ around that center. --- ### Variance For the mean, we have a convenient way of describing this: the average distance (using squared difference) from the mean. We call this the _variance_ of the data: $$ \mathrm{var}(x) = \frac{1}{n} \sum_{i=1}^n (x_i - \overline{x})^2 $$ --- You will also see it with a slightly different constant in the front for technical reasons that we may discuss later on: $$ \mathrm{var}(x) = \frac{1}{n-1} \sum_{i=1}^n (x_i - \overline{x})^2 $$ --- Variance is a commonly used statistic for spread but it has the disadvantage that its units are not easy to conceptualize (e.g., squared diamond depth). A spread statistic that is in the same units as the data is the _standard deviation_, which is just the squared root of variance: $$ \mathrm{sd}(x) = \sqrt{\frac{1}{n}\sum_{i=1}^n (x_i - \overline{x})^2} $$ --- We can also use _standard deviations_ as an interpretable unit of how far a given data point is from the mean: ```{r, echo=FALSE, fig.align="center", fig.height=6} # create a df with standard deviation values to plot sds_to_plot <- seq(-6,6) sd_df <- diamonds %>% summarize(mean_depth = mean(depth), sd_depth = sd(depth)) %>% slice(rep_along(sds_to_plot, 1)) %>% mutate(sd_to_plot=sds_to_plot) %>% mutate(sd_val = mean_depth + sd_to_plot * sd_depth) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=mean(depth)), col="blue", size=1.5) + geom_vline(aes(xintercept = sd_val), data=sd_df, linetype=2, size=1.2 - abs(seq(-1,1, len=13))) ``` --- As a rough guide, we can use "standard deviations away from the mean" as a measure of spread as follows: | SDs | proportion | Interpretation | |-----|------------|----------------| | 1 | `r round(1-2*pnorm(-1),2)` | `r 100*round(1-2*pnorm(-1),2)`% of the data is within $\pm$ 1 sds | | 2 | `r round(1-2*pnorm(-2),2)` | `r 100*round(1-2*pnorm(-2),2)`% of the data is within $\pm$ 2 sds | | 3 | `r round(1-2*pnorm(-3),4)` | `r 100*round(1-2*pnorm(-3),4)`% of the data is within $\pm$ 3 sds | | 4 | `r round(1-2*pnorm(-4),6)` | `r 100*round(1-2*pnorm(-4),6)`% of the data is within $\pm$ 4 sds | | 5 | `r round(1-2*pnorm(-5),8)` | `r 100*round(1-2*pnorm(-5),8)`% of the data is within $\pm$ 5 sds | | 6 | `r round(1-2*pnorm(-6),10)` | `r 100*round(1-2*pnorm(-6),10)`% of the data is within $\pm$ 6 sds | --- ### Spread estimates using rank statistics Just like we saw how the median is a rank statistic used to describe central tendency, we can also use rank statistics to describe spread. For this we use two more rank statistics: the first and third _quartiles_, $x_{(n/4)}$ and $x_{(3n/4)}$ respectively. --- ```{r, warning=FALSE, echo=FALSE, fig.align="center", fig.height=6.5} quartile_df <- diamonds %>% summarize(first=quantile(diamonds$depth, p=1/4), third=quantile(diamonds$depth, p=3/4)) %>% tidyr::gather(quartile, value) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=median(depth)), size=1.3, color="red") + geom_vline(aes(xintercept=value), data=quartile_df, size=1,color="red", linetype=2) ``` --- Note, the five order statistics we have seen so far: minimum, maximum, median and first and third quartiles are so frequently used that this is exactly what `R` uses by default as a `summary` of a numeric vector of data (along with the mean): ```{r} summary(diamonds$depth) ``` --- This five-number summary are also all of the statistics used to construct a boxplot to summarize data distribution. In particular, the _inter-quartile range_, which is defined as the difference between the third and first quartile: $\mathrm{IQR}(x) = x_{(3n/4)} - x_{(1n/4)}$ gives a measure of spread. --- The interpretation here is that half the data is within the IQR around the median. ```{r} diamonds %>% summarize(sd_depth = sd(depth), iqr_depth = IQR(depth)) ``` --- ## Outliers We can use estimates of spread to identify outlier values in a dataset. Given an estimate of spread based on the techniques we've just seen, we can identify values that are _unusually_ far away from the center of the distribution. --- One often cited rule of thumb is based on using standard deviation estimates. We can identify outliers as the set $$ \mathrm{outliers_{sd}}(x) = \\{x_j \, | \, |x_j| > \overline{x} + k \times \mathrm{sd}(x) \\} $$ where $\overline{x}$ is the sample mean of the data and $\mathrm{sd}(x)$ it's standard deviation. Multiplier $k$ determines if we are identifying (in Tukey's nomenclature) _outliers_ or points that are _far out_. --- ```{r, warnings=FALSE, echo=FALSE,fig.align="center",fig.height=6.5} outlier_df <- diamonds %>% summarize(mean_depth=mean(depth), sd_depth=sd(depth)) %>% slice(rep(1, 4)) %>% mutate(multiplier = c(-3, -1.5, 1.5, 3)) %>% mutate(outlier_limit = mean_depth + multiplier * sd_depth) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=outlier_limit), data=outlier_df, color="blue") ``` --- While this method works relatively well in practice, it presents a fundamental problem. Severe outliers can significantly affect spread estimates based on standard deviation. Specifically, spread estimates will be inflated in the presence of severe outliers. --- To circumvent this problem, we use rank-based estimates of spread to identify outliers as: $$\mathrm{outliers_{IQR}}(x) = \{x_j \, | \\ \, x_j < x_{(1/4)} - k \times \mathrm{IQR}(x) \; \mathrm{ or } \\ \; x_j > x_{(3/4)} + k \times \mathrm{IQR}(x)\}$$ This is usually referred to as the _Tukey outlier rule_, with multiplier $k$ serving the same role as before. --- We use the IQR here because it is less susceptible to be inflated by severe outliers in the dataset. It also works better for skewed data than the method based on standard deviation. --- ```{r, warnings=FALSE, echo=FALSE, fig.align="center", fig.height=6.5} outlier_df <- diamonds %>% summarize(q1=quantile(depth, 1/4), q3=quantile(depth, 3/4), iqr=IQR(depth)) %>% slice(rep(1, 2)) %>% mutate(multiplier = c(1.5, 3)) %>% mutate(lower_outlier_limit = q1 - multiplier * iqr) %>% mutate(upper_outlier_limit = q3 + multiplier * iqr) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=lower_outlier_limit), data=outlier_df, color="red") + geom_vline(aes(xintercept=upper_outlier_limit), data=outlier_df, color="red") ``` --- ### Skew The five-number summary can be used to understand if data is skewed. Consider the differences between the first and third quartiles to the median. --- ```{r} diamonds %>% summarize(med_depth = median(depth), q1_depth = quantile(depth, 1/4), q3_depth = quantile(depth, 3/4)) %>% mutate(d1_depth = med_depth - q1_depth, d2_depth = q3_depth - med_depth) %>% select(d1_depth, d2_depth) ``` --- If one of these differences is larger than the other, then that indicates that this dataset might be skewed. The range of data on one side of the median is longer (or shorter) than the range of data on the other side of the median. --- ### Covariance and correlation The scatter plot is a visual way of observing relationships between pairs of variables. Like descriptions of distributions of single variables, we would like to construct statistics that summarize the relationship between two variables quantitatively. To do this we will extend our notion of _spread_ (or variation of data around the mean) to the notion of _co-variation_: do pairs of variables vary around the mean in the same way. --- Consider now data for two variables over the same $n$ entities: $(x_1,y_1), (x_2,y_2), \ldots, (x_n,y_n)$. For example, for each diamond, we have `carat` and `price` as two variables. --- ```{r, echo=FALSE, fig.align="center", fig.height=6} diamonds %>% ggplot(aes(x=carat, y=price)) + geom_point() + geom_hline(aes(yintercept = mean(price)), color="blue", lty=2) + geom_vline(aes(xintercept = mean(carat)), color="blue", lty=2) ``` --- We want to capture the relationship: does $x_i$ vary in the same direction and scale away from its mean as $y_i$? This leads to _covariance_ $$ cov(x,y) = \frac{1}{n} \sum_{i=1}^n (x_i - \overline{x})(y_i - \overline{y}) $$ --- Just like variance, we have an issue with units and interpretation for covariance, so we introduce _correlation_ (formally, Pearson's correlation coefficient) to summarize this relationship in a _unit-less_ way: $$ cor(x,y) = \frac{cov(x,y)}{sd(x) sd(y)} $$ --- As before, we can also use rank statistics to define a measure of how two variables are associated. One of these, _Spearman correlation_ is commonly used. It is defined as the Pearson correlation coefficient of the ranks (rather than actual values) of pairs of variables. --- ### Summary EDA: visual and computational methods to describe the distribution of data attributes over a range of values Grammar of graphics as effective tool for visual EDA Statistical summaries that directly establish properties of data distribution <file_sep>/materials/lectures/Regression/multiple_linear_regression.Rmd --- title: "Multiple Linear Regression" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Now that we've seen regression using a single predictor we'll move on to regression using multiple predictors. In this case, we use models of conditional expectation represented as linear functions of multiple variables: $$ \mathbb{E}[Y|X_1=x_1,X_2=x_2,\ldots,X_p=x_p] = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \cdots \beta_3 x_3 $$ In the case of our advertising example, this would be a model: $$ \mathtt{sales} = \beta_0 + \beta_1 \times \mathtt{TV} + \beta_2 \times \mathtt{newspaper} + \beta_3 \times \mathtt{facebook} $$ These models let us make statements of the type: "holding everything else constant, sales increased on average by 1000 per dollar spent on Facebook advertising" (this would be given by parameter $\beta_3$ in the example model). ### Estimation in multivariate regression Generalizing simple regression, we estimate $\beta$'s by minimizing an objective function that represents the difference between observed data and our expectation based on the linear model: $$ \begin{align} RSS & = \frac{1}{2} \sum_{i=1}^n (y_i - \hat{y}_i)^2 \\ {} & = \frac{1}{2} \sum_{i=1}^n (y_i - (\beta_0 + \beta_1 x_1 + \cdots + \beta_p x_p))^2 \end{align} $$ ![](multiple_rss.png) The minimizer is found using numerical algorithms to solve this type of _least squares_ problems. These are covered in Linear Algebra courses, and include the QR decomposition, Gauss-Seidel method, and many others. Later in the course we will look at _stochastic gradient descent_, a simple algorithm that scales to very large datasets. ### Example (cont'd) Continuing with our Auto example, we can build a model for miles per gallon using multiple predictors: ```{r, echo=FALSE, message=FALSE} library(ISLR) data(Auto) library(dplyr) library(broom) library(ggplot2) ``` ```{r} auto_fit <- lm(mpg~1+weight+cylinders+horsepower+displacement+year, data=Auto) auto_fit ``` From this model we can make the statement: "Holding everything else constant, cars run 0.76 miles per gallon more each year on average". ### Statistical statements (cont'd) Like simple linear regression, we can construct confidence intervals, and test a null hypothesis of no relationship ($\beta_j=0$) for the parameter corresponding to each predictor. This is again nicely managed by the `broom` package: ```{r} auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats %>% knitr::kable() ``` ```{r, echo=FALSE} print_confint <- function(fit_df, term, digits=2) { i <- match(term, fit_df$term) confint_offset <- 1.95 * fit_df$std.error[i] confint <- round(c(fit_df$estimate[i] - confint_offset, fit_df$estimate[i], fit_df$estimate[i] + confint_offset), digits) paste0("{}_{", confint[1], "} ", confint[2], "_{", confint[3], "}") } print_pval <- function(fit_df, term) { i <- match(term, fit_df$term) pval <- fit_df$p.value[i] out <- ifelse(pval<1e-16, "<1e-16", paste0("=", pval)) paste0("P-value", out) } ``` In this case we would reject the null hypothesis of no relationship only for predictors `weight` and `year`. We would write the statement for year as follows: "Holding everything else constant, cars run $`r auto_fit_stats %>% print_confint("year")`$ miles per gallon more each year on average (P-value=`r auto_fit_stats %>% print_pval("year")`)". ### The F-test We can make additional statements for multivariate regression: "is there a relationship between _any_ of the predictors and the response?". Mathematically, we write this as $\beta_1 = \beta_2 = \cdots = \beta_p = 0$. Under the null, our model for $y$ would be estimated by the sample mean $\overline{y}$, and the error for that estimate is by total sum of squared error $TSS$. As before, we can compare this to the residual sum of squared error $RSS$ using the $F$ statistic: $$ \frac{(\mathrm{TSS}-\mathrm{RSS})/p}{\mathrm{RSS}/(n-p-1)} $$ If this statistic is greater (enough) than 1, then we reject hypothesis that there is no relationship between response and predictors. Back to our example, we use the `glance` function to compute this type of summary: ```{r} auto_fit %>% glance() %>% select(r.squared, sigma, statistic, df, p.value) %>% knitr::kable() ``` In comparison with the linear model only using `weight`, this multivariate model explains _more of the variance_ of `mpg`, but using more predictors. This is where the notion of _degrees of freedom_ comes in: we now have a model with expanded _representational_ ability. However, the bigger the model, we are conditioning more and more, and intuitively, given a fixed dataset, have fewer data points to estimate conditional expectation for each value of the predictors. That means, that are estimated conditional expectation is less _precise_. To capture this phenomenon, we want statistics that tradeoff how well the model fits the data, and the "complexity" of the model. Now, we can look at the full output of the `glance` function: ```{r} auto_fit %>% glance() %>% knitr::kable() ``` Columns `AIC` and `BIC` display statistics that penalize model fit with model size. The smaller this value, the better. Let's now compare a model only using `weight`, a model only using `weight` and `year` and the full multiple regression model we saw before. ```{r} lm(mpg~weight, data=Auto) %>% glance() %>% knitr::kable() ``` ```{r} lm(mpg~weight+year, data=Auto) %>% glance() %>% knitr::kable() ``` In this case, using more predictors beyond `weight` and `year` doesn't help. ### Categorical predictors (cont'd) We saw transformations for categorical predictors with only two values, and deferred our discussion of categorical predictors with more than two values. In our example we have the `origin` predictor, corresponding to where the car was manufactured, which has multiple values ```{r} Auto <- Auto %>% mutate(origin=factor(origin)) levels(Auto$origin) ``` As before, we can only use numerical predictors in linear regression models. The most common way of doing this is to create new dummy predictors to _encode_ the value of the categorical predictor. Let's take a categorical variable `major` that can take values `CS`, `MATH`, `BUS`. We can encode these values using variables $x_1$ and $x_2$ $$ x_1 = \left\{ \begin{align} 1 & \textrm{ if MATH} \\ 0 & \textrm{ o.w.} \end{align} \right. $$ $$ x_2 = \left\{ \begin{align} 1 & \textrm{ if BUS} \\ 0 & \textrm{ o.w.} \end{align} \right. $$ Now let's build a model to capture the relationship between `salary` and `major`: $$ \mathtt{salary} = \beta_0 + \beta_1 x_1 + \beta_2 x_2 $$ What is the expected salary for a CS major? $\beta_0$. For a MATH major? $\beta_0 + \beta_1$. For a BUS major? $\beta_0 + \beta_2$. So, $\beta_1$ is the average difference in salary between MATH and CS majors. How can we calculate the average difference in salary between MATH and BUS majors? $\beta_1 - \beta_2$. The `lm` function in R does this transformation by default when a variable has class `factor`. We can see what the underlying numerical predictors look like by using the `model_matrix` function and passing it the model formula we build: ```{r} extended_df <- model.matrix(~origin, data=Auto) %>% as.data.frame() %>% mutate(origin = Auto$origin) extended_df %>% filter(origin == "1") %>% head() ``` ```{r} extended_df %>% filter(origin == "2") %>% head() ``` ```{r} extended_df %>% filter(origin == "3") %>% head() ``` ## Interactions in linear models The linear models so far include _additive_ terms for a single predictor. That let us made statemnts of the type "holding everything else constant...". But what if we think that a pair of predictors _together_ have a relationship with the outcome. We can add these _interaction_ terms to our linear models as products: $$ \mathbb{E} Y|X_1=x_1,X_2=x2 = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \beta_{12} x_1 x_2 $$ Consider the advertising example: $$ \mathtt{sales} = \beta_0 + \beta_1 \times \mathtt{TV} + \beta_2 \times \mathtt{facebook} + \beta_3 \times (\mathtt{TV} \times \mathtt{facebook}) $$ If $\beta_3$ is positive, then the effect of increasing TV advertising money is increased if facebook advertising is also increased. When using categorical variables, interactions have an elegant interpretation. Consider our car example, and suppose we build a model with an interaction between `weight` and `origin`. Let's look at what the numerical predictors look like: ```{r} extended_df <- model.matrix(~weight+origin+weight:origin, data=Auto) %>% as.data.frame() %>% mutate(origin = Auto$origin) extended_df %>% filter(origin == "1") %>% head() ``` ```{r} extended_df %>% filter(origin == "2") %>% head() ``` ```{r} extended_df %>% filter(origin == "3") %>% head() ``` So what is the expected miles per gallon for a car with `origin == 1` as a function of weight? $$ \mathtt{mpg} = \beta_0 + \beta_1 \times \mathtt{weight} $$ Now how about a car with `origin == 2`? $$ \mathtt{mpg} = \beta_0 + \beta_1 \times \mathtt{weight} + \beta_2 + \beta_4 \times \mathtt{weight} $$ Now think of the graphical representation of these lines. For `origin == 1` the intercept of the regression line is $\beta_0$ and its slope is $\beta_1$. For `origin == 2` the intercept of the regression line is $\beta_0 + \beta_2$ and its slope is $\beta_1+\beta_4$. `ggplot` does this when we map a factor variable to a aesthetic, say color, and use the `geom_smooth` method: ```{r} Auto %>% ggplot(aes(x=weight, y=mpg, color=origin)) + geom_point() + geom_smooth(method=lm) ``` The intercept of the three lines seem to be different, but the slope of `origin == 3` looks different (decreases faster) than the slopes of `origin == 1` and `origin == 2` that look very similar to each other. Let's fit the model and see how much statistical confidence we can give to those observations: ```{r} auto_fit <- lm(mpg~weight*origin, data=Auto) auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats %>% knitr::kable() ``` So we can say that for `origin == 3` the relationship between `mpg` and `weight` is different but not for the other two values of `origin`. Now, there is still an issue here because this could be the result of a poor fit from a linear model, it seems none of these lines do a very good job of modeling the data we have. We can again check this for this model: ```{r} auto_fit %>% augment() %>% ggplot(aes(x=.fitted, y=.resid)) + geom_point() ``` The fact that residuals are not centered around zero suggests that a linear fit does not work well in this case. ### Additional issues with linear regression We saw previously some issues with linear regression that we should take into account when using this method for modeling. Multiple linear regression introduces an additional issue that is extremely important to consider when interpreting the results of these analyses: collinearity. ![](collinearity.png) In this example, you have two predictors that are very closely related. In that case, the set of $\beta$'s that minimize RSS may not be unique, and therefore our interpretation is invalid. You can identify this potential problem by regressing predictors onto each other. The usual solution is to fit models only including one of the colinear variables. <file_sep>/materials/lecture-notes/13-sql_system_constructs.Rmd # SQL System Constructs Database management systems are software applications designed for very efficient manipulation of data that target a relatively small number of operations. Since they are also defined to operate over a fairly restrictive data model, they are extremely useful in situations where data consistency and safety are required. Here are some examples of capabilities found in DBMS that help in that regard: - Transactions - A transaction is a sequence of queries and update statements executed as a single unit - For example, transferring money from one account to another - Both the *deduction* from one account and *credit* to the other account should happen, or neither should - Triggers - A trigger is a statement that is executed automatically by the system as a side effect of a modification to the database - Integrity Constraints - Predicates on the database that must always hold - Key Constraints: Specifiying something is a primary key or unique ## SQL as a Data Definition Language The Structured Query Language (SQL) is both a _Data Definition Language_ and a _Data Manipulation Language_ ```sql CREATE TABLE <name> ( <field> <domain>, ... ) INSERT INTO <name> (<field names>) VALUES (<field values>) DELETE FROM <name> WHERE <condition> UPDATE <name> SET <field name> = <value> WHERE <condition> SELECT <fields> FROM <name> WHERE <condition> ``` We can create tables and specify primary key attributes which enforce integrity constraints at the system level ```sql CREATE TABLE customer ( ssn CHAR(9) PRIMARY KEY, cname CHAR(15), address CHAR(30), city CHAR(10), UNIQUE (cname, address, city)); ``` Attribute constraints: Constraints on the values of attributes `bname char(15) not null` `balance int not null, check (balance >= 0)` - Referential integrity: prevent dangling tuples ```sql CREATE TABLE branch(bname CHAR(15) PRIMARY KEY, ...); CREATE TABLE loan(..., FOREIGN KEY bname REFERENCES branch); ``` - Can tell the system what to do if a referenced tuple is being deleted - Global Constraints - Single-table ```sql CREATE TABLE branch (..., bcity CHAR(15), assets INT, CHECK (NOT(bcity = ‘Bkln’) OR assets > 5M)) ``` - Multi-table ```sql CREATE ASSERTION loan-constraint CHECK (NOT EXISTS ( SELECT * FROM loan AS L WHERE NOT EXISTS( SELECT * FROM borrower B, depositor D, account A WHERE B.cname = D.cname AND D.acct_no = A.acct_no AND L.lno = B.lno))) ``` ## Set Operations and Comparisons - Set operations ```sql select name from movieExec union/intersect/minus select name from movieStar ``` - Set Comparisons ```sql select * from movies where year in [1990, 1995, 2000]; select * from movies where year not in ( select extract(year from birthdate) from MovieStar ); ``` ## Views ```sql create view DisneyMovies select * from movie m where m.studioname = 'disney'; ``` Can use it in any place where a tablename is used. Views are used quite extensively to: (1) simplify queries, (2) hide data (by giving users access only to specific views). Views may be *materialized* or not. ## NULLs Value of any attribute can be NULL if value is unknown, or it is not applicable, or hidden, etc. It can lead to counterintuitive behavior. For example, the following query does not return movies where `length = NULL` ```sql select * from movies where length >= 120 or length <= 120` ``` Aggregate operations can be especially tricky when NULLs are present. <file_sep>/materials/lectures/Classification/linear_classification.Rmd --- title: "Linear models for classification" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r, include=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` The general classification setting is: can we predict categorical response/output $Y$, from set of predictors $X_1,X_2,\ldots,X_p$? As in the regression case, we assume training data $(\mathbf{x}_1, y_1), \ldots, (\mathbf{x}_n, y_n)$. In this case, however, responses $y_i$ are categorical and take one of a fixed set of values. ![](4_1a.png) ![](4_1b.png) ### An example classification problem An individual's choice of transportation mode to commute to work. Predictors: income, cost and time required for each of the alternatives: driving/carpooling, biking, taking a bus, taking the train. Response: whether the individual makes their commute by car, bike, bus or train. From a classification model based on this data we could perform an inference task: how do people value price and time when considering their transportation choice. ### Why not linear regression? In our previous unit we learned about linear regression. Why can't we use linear regression in the classification setting. For categorical responses with more than two values, if order and scale (units) don't make sense, then it's not a regression problem $$ Y = \begin{cases} 1 & \textrm{if } \mathtt{stroke} \\ 2 & \textrm{if } \mathtt{drug overdose} \\ 3 & \textrm{if } \mathtt{epileptic seizure} \end{cases} $$ For **binary** responses, it's a little better: $$ Y = \begin{cases} 0 & \textrm{if } \mathtt{stroke} \\ 1 & \textrm{if } \mathtt{drug overdose} \\ \end{cases} $$ We could use linear regression in this setting and _interpret_ response $Y$ as a probability (e.g, if $\hat{y} > 0.5$ predict $\mathtt{drug overdose}$) ![](4_2.png) ### Classification as probability estimation problem This observation motivates how we will address the classification problem in general. Instead of modeling classes 0 or 1 directly, we will model the conditional class probability $p(Y=1|X=x)$, and classify based on this probability. In general, classification approaches use _discriminant_ (think of _scoring_) functions to do classification. _Logistic regression_ is **one** way of estimating the class probability $p(Y=1|X=x)$ (also denoted $p(x)$) ```{r,echo=FALSE} library(MASS) library(RColorBrewer) mycols <- brewer.pal(8, "Dark2")[c(3,2)] s <- sqrt(1/5) set.seed(30) makeX <- function(M, n=100, sigma=diag(2)*s) { z <- sample(1:nrow(M), n, replace=TRUE) m <- M[z,] return(t(apply(m,1,function(mu) mvrnorm(1,mu,sigma)))) } M0 <- mvrnorm(10, c(1,0), diag(2)) # generate 10 means x0 <- makeX(M0) ## the final values for y0=blue M1 <- mvrnorm(10, c(0,1), diag(2)) x1 <- makeX(M1) x <- rbind(x0, x1) y <- c(rep(0,100), rep(1,100)) cols <- mycols[y+1] GS <- 75 # put data in a Gs x Gs grid XLIM <- range(x[,1]) tmpx <- seq(XLIM[1], XLIM[2], len=GS) YLIM <- range(x[,2]) tmpy <- seq(YLIM[1], YLIM[2], len=GS) newx <- expand.grid(tmpx, tmpy) colnames(newx) <- c("X1","X2") ``` ```{r, echo=FALSE, fig.height=10, fig.width=10} layout(matrix(1:4, nr=2, byrow=FALSE)) plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n", main="Training Set") points(x, col=cols) # logistic regression dat <- data.frame(X1=x[,1], X2=x[,2]) fit <- glm(y~X1+X2, data=dat,family=binomial) yhat <- predict(fit, newdata=newx) yhat <- ifelse(yhat > 0, 2, 1) colshat <- mycols[yhat] coefs <- coef(fit) a <- -coefs[1] / coefs[3] b <- -coefs[2] / coefs[3] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="logistic regression") points(x, col=cols) points(newx, col=colshat, pch=".") abline(a=a,b=b) # KNN(15) library(class) yhat <- knn(x, newx, y, k=15) colshat <- mycols[as.numeric(yhat)] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="KNN(15)") points(x, col=cols) points(newx, col=colshat, pch=".") contour(tmpx, tmpy, matrix(as.numeric(yhat),GS,GS), levels=c(1,2), add=TRUE, drawlabels=FALSE) # KNN(1) yhat <- knn(x, newx, y, k=1) colshat <- mycols[as.numeric(yhat)] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="KNN(1)") points(x, col=cols) points(newx, col=colshat, pch=".") contour(tmpx, tmpy, matrix(as.numeric(yhat),GS,GS), levels=c(1,2), add=TRUE, drawlabels=FALSE) ``` ### Logistic regression The basic idea behind _logistic regression_ is to build a **linear** model _related_ to $p(x)$, since linear regression directly (i.e. $p(x) = \beta_0 + \beta_1 x$) doesn't work. Why? Instead we build a linear model of _log-odds_: $$ \log \frac{p(x)}{1-p(x)} = \beta_0 + \beta_1 x $$ Odds are equivalent to ratios of probabilities. For example, "two to one odds that <NAME> wins the French Open" means "the probability that <NAME> wins the French Open is double the probability he loses". So, if odds = 2, $p(x)=2/3$. If odds = 1/2, $p(x)=1/3$. In general odds = $\frac{p(x)}{1-p(x)}$. ### Exercises 1. Suppose an individual has a 16% chance of defaulting on their credit card payment. What are the odds that she will default? 2. On average, what fraction of people with an odds of 0.37 of defaulting on their credit card payment will in fact default? ```{r, echo=FALSE, results="hide", fig.width=10, fig.height=10} library(ISLR) data(Default) fit <- glm(default~balance, data=Default, family=binomial) ilogis <- function(theta) exp(theta) / (1 + exp(theta)) makeplot <- function(beta1) { with(Default, plot(balance, as.numeric(default)-1, ylab="Probability of default", main=substitute(list(hat(beta)[0]==beta0, hat(beta)[1]==beta1), list(beta0=round(coef(fit)[1], digits=2), beta1=round(beta1, digits=3))))) curve(ilogis(coef(fit)[1] + beta1 * x), add=TRUE, col="blue", lwd=1.3) abline(h=c(0,1), lty=2) } layout(matrix(1:4, nr=2, byrow=TRUE)) sapply(c(0.001, coef(fit)[2], 0.01, 0.1), makeplot) ``` Here is how we compute a logistic regression model in R ```{r, message=FALSE} library(ISLR) library(dplyr) library(broom) library(ggplot2) data(Default) default_fit <- glm(default ~ balance, data=Default, family=binomial) default_fit %>% tidy() %>% knitr::kable(digits=4) ``` Interpretation of logistic regression models is slightly different than the linear regression model we looked at. In this case, the **odds** that a person defaults increase by $e^{0.05} \approx `r round(exp(0.05),3)`$ for every dollar in their account balance. As before, the **accuracy** of $\hat{\beta}_1$ as an estimate of the **population** parameter is given its standard error. We can again construct a confidence interval for this estimate as we've done before. As before, we can do hypothesis testing of a relationship between account balance and the probability of default. In this case, we use a $Z$-statistic $\frac{\hat{\beta}_1}{\mathrm{SE}(\hat{\beta}_1)}$ which plays the role of the t-statistic in linear regression: a scaled measure of our estimate (signal / noise). As before, the P-value is the probability of seeing a Z-value as large (e.g., 24.95) under the null hypothesis that **there is no relationship between balance and the probability of defaulting**, i.e., $\beta_1=0$ in the population. In accordance to the "inverse problem" view we've been developing in class, we require an algorithm required to _estimate_ parameters $\beta_0$ and $\beta_1$ according to a data fit criterion. In logistic regression we use the **bernoulli** probability model we saw previously (think of flipping a coin weighted by $p(x)$), and _estimate_ parameters to **maximize** the _likelihood_ of the observed training data under this coin flipping (binomial) model. I.e.: solve the following optimization problem $$ \max_{\beta_0, \beta_1} \sum_{i:\, y_i=1} log(p(x_i)) + \sum_{i: y_i=0} log(1-p(x_i)) $$ This is a non-linear (but convex) optimization problem. You can learn algorithms to solve it in "Computational Methods" class (CMSC 460) ### Making predictions We can use a learned logistic regression model to make predictions. E.g., "on average, the probability that a person with a balance of $1,000 defaults is": $$ \hat{p}(1000) = \frac{e^{\hat{\beta}_0 + \hat{\beta}_1 \times 1000}}{1+e^{\beta_0 + \beta_1 \times 1000}} \approx \frac{e^{-10.6514 + 0.0055 \times 1000}}{1+e^{-10.6514 + 0.0055 \times 1000}} \\ \approx 0.00576 $$ ### Multiple logistic regression This is a classification analog to linear regression: $$ \log \frac{p(\mathbf{x})}{1-p(\mathbf{x})} = \beta_0 + \beta_1 x_1 + \cdots + \beta_p x_p $$ ```{r} fit <- glm(default ~ balance + income + student, data=Default, family="binomial") fit %>% tidy() %>% knitr::kable(digits=4) ``` As in multiple linear regression it is essential to avoid **confounding!**. Consider an example of single logistic regression of default vs. student status: ```{r} fit1 <- glm(default ~ student, data=Default, family="binomial") fit1 %>% tidy() %>% knitr::kable(digits=4) ``` and a multiple logistic regression: ```{r} fit2 <- glm(default ~ balance + income + student, data=Default, family="binomial") fit2 %>% tidy() %>% knitr::kable(digits=4) ``` ```{r, echo=FALSE} bal_range <- range(Default$balance) plot(0,0,xlim=bal_range,ylim=c(0,1),xlab="Credit Card Balance", ylab="Default Rate", type="n") curve(predict(fit1, newdata=data.frame(student="Yes", balance=x), type="response"), add=TRUE, lty=2, col="orange", lwd=1.6) curve(predict(fit1, newdata=data.frame(student="No", balance=x), type="response"), add=TRUE, lty=2, col="blue", lwd=1.6) curve(predict(fit2, newdata=data.frame(student="Yes", balance=x, income=mean(Default$income)), type="response"), add=TRUE, lty=1, col="orange", lwd=1.6) curve(predict(fit2, newdata=data.frame(student="No", balance=x, income=mean(Default$income)), type="response"), add=TRUE, lty=1, col="blue", lwd=1.6) ``` ```{r, echo=FALSE} boxplot(balance~student, data=Default, col=c("blue", "orange"), xlab="Student Status", ylab="Credit Card Balance") ``` ### Exercise 1. Suppose we collect data for a group of students in a statistics class with variables X1 = hours studied, X2 = undergrad GPA, and Y = receive an A. We fit a logistic regression and produce estimated coefficients, $\hat{\beta}_0=-6, \hat{\beta}_1=0.05,\hat{\beta}_2=1$. Estimate the probability that a student who studies for 40h and has an undergraduate GPA of 3.5 gets an A in the class. 2. With estimated parameters from previous question, and GPA of 3.5 as before, how many hours would the student need to study to have a 50% chance of getting an A in the class? ## Linear Discriminant Analysis Linear Discriminant Analysis (LDA) is a different linear method to estimate a probability model used for classification. Recall that we want to partition data based on **class probability**: e.g., _find the $\mathbf{X}$ for which_ $P(\mathrm{default=Yes}|X) > P(\mathrm{default=No}|X)$. In logistic regression, **we made no assumption about $\mathbf{X}$**. In other cases, we **can** make assumptions about $\mathbf{X}$ that improve prediction performance (if assumptions hold, obviously) ```{r, echo=FALSE, fig.width=10, fig.height=10} layout(matrix(1:2,nr=1)) hist(Default$balance[Default$default=="Yes"], main="Balance distribution if defaulting") hist(Default$balance[Default$default=="No"], main = "Balance distribution if not defaulting") ``` This suggests we can model `balance` for each of the classes with a normal distribution. WARNING, BIG ASSUMPTION: We will assume `balance` has the same *variance* for both classes (this is what makes LDA linear). So, we estimate average `balance` for people who _do not_ default: $$ \hat{\mu}_0 = \frac{1}{n_0} \sum_{i:\, y_i=0} x_i $$ for people who do default: $$ \hat{\mu}_1 = \frac{1}{n_1} \sum_{i:\, y_i=1} x_i $$ and estimate variance for both classes as $$ \hat{\sigma}^2 = \frac{1}{n-2} \sum_{k=1,2} \sum_{i:\, y_i=k} (x_i - \hat{\mu}_k)^2 $$ ```{r, echo=FALSE} library(dplyr) balance_means <- Default %>% group_by(default) %>% summarize(balance_mean=mean(balance)) balance_means balance_sd <- Default %>% group_by(default) %>% mutate(balance_mean = mean(balance)) %>% mutate(squared_centered_balance = (balance - balance_mean)^2) %>% summarize(rss=sum(squared_centered_balance), n=n()) %>% summarize(balance_sd=sqrt(sum(rss) / (sum(n)-2))) balance_sd ``` Linear Discriminant Analysis ============================== ```{r, echo=FALSE, fig.width=15, fig.height=10} q <- Default %>% ggplot(aes(x=balance)) q <- q + geom_histogram(data=subset(Default, default=="No"), fill="red", alpha=.3, binwidth=50) + geom_histogram(data=subset(Default, default=="Yes"), fill="blue", alpha=.3, binwidth=50) + geom_vline(xintercept=balance_means$balance_mean, size=1.5, linetype=2) + theme(axis.title=element_text(size=24), axis.text=element_text(size=18)) q ``` We can "score" values of `balance` based on these estimates: $$ f_k(x) = \frac{1}{\sqrt{2\pi}\sigma} \exp \left(-\frac{1}{2\sigma^2} (x-\mu_k)^2 \right) $$ Remember, what we want is **posterior class probability** $p(Y=k|X)$, for that we need to include the probability that we _observe_ class $k$. This is called **prior class probability**, denoted $\pi_k$, means the proportion of times you expect people to default regardless of any other attribute. We can estimate from training data as the proportion of observations with label $k$. Bayes' Rule (or Theorem) gives us a way of computing $P(Y=k|X)$ using score $f_k(x)$ (from the class normal assumption) and prior $\pi_k$: $$ P(Y=k|X) = \frac{f_k(x) \pi_k}{\sum_l f_l(x) \pi_l} $$ If data (conditioned by class) is distributed so that $f_k$ is the right probability function to use, then predicting the class that maximizes $P(Y=k|X)$ is the **optimal** thing to do. This is referred to the _Bayes classifier_ (aka the Holy Grail of classification) #### How to train LDA Compute class means and squared error based on class mean ```{r} lda_stats <- Default %>% group_by(default) %>% mutate(class_mean=mean(balance), squared_error=(balance-class_mean)^2) ``` Compute class sizes and sum of squared errors ```{r} lda_stats <- lda_stats %>% summarize(class_mean=first(class_mean), class_size=n(), sum_squares=sum(squared_error)) ``` Compute class prior and variance (note same variance for both classes) ```{r, results="as.is"} lda_stats <- lda_stats %>% mutate(class_prior=class_size/sum(class_size), sigma2=sum(sum_squares) / (sum(class_size) - 2)) %>% select(default, class_mean, class_prior, sigma2) knitr::kable(lda_stats) ``` How do we predict with LDA? Predict `Yes` if $P(Y=1|X) > P(Y=0|X)$ Equivalently: $$ \log{\frac{P(Y=1|X)}{P(Y=0|X)}} > 0 \Rightarrow \\ \log f_1(x) + \log \pi_1 > \log f_0(x) + \log \pi_0 $$ This turns out to be a linear function of $x$! ```{r} lda_log_ratio <- function(balance, lda_stats) { n <- length(balance) # subtract class mean centered_balance <- rep(balance, 2) - rep(lda_stats$class_mean, each=n) # scale by standard deviation scaled_balance <- centered_balance / sqrt(lda_stats$sigma2[1]) # compute log normal density and add log class prior lprobs <- dnorm(scaled_balance, log=TRUE) + log(rep(lda_stats$class_prior, each=n)) # compute log ratio of class probabilities lprobs <- matrix(lprobs, nc=2) colnames(lprobs) <- lda_stats$default lprobs[,"Yes"] - lprobs[,"No"] } ``` ```{r, fig.width=12} test_balance <- seq(0, 3000, len=100) plot(test_balance, lda_log_ratio(test_balance, lda_stats), type="l", xlab="Balance", ylab="Log Probability Ratio", cex=1.4) ``` ### Classifier evaluation How do we determine how well classifiers are performing? One way is to compute the _error rate_ of the classifier, the percent of mistakes it makes when predicting class ```{r} library(MASS) lda_fit <- lda(default ~ balance, data=Default) lda_pred <- predict(lda_fit, data=Default) print(table(predicted=lda_pred$class, observed=Default$default)) # error rate mean(Default$default != lda_pred$class) * 100 # dummy error rate mean(Default$default != "No") * 100 ``` In this case, it would seem that LDA performs well. But in fact, we can get similar error rate by always predicting "no default". We can see from this table that LDA errors are not symmetric. It's most common error is that _it misses true defaults_. We need a more precise language to describe classification mistakes: | | True Class + | True Class - | Total | |------------------:|:--------------------|---------------------|-------| | Predicted Class + | True Positive (TP) | False Positive (FP) | P* | | Predicted Class - | False Negative (FN) | True Negative (TN) | N* | | Total | P | N | | Using these we can define statistics that describe classifier performance | Name | Definition | Synonyms | |--------------------------------:|:-----------|---------------------------------------------------| | False Positive Rate (FPR) | FP / N | Type-I error, 1-Specificity | | True Positive Rate (TPR) | TP / P | 1 - Type-II error, power, sensitivity, **recall** | | Positive Predictive Value (PPV) | TP / P* | **precision**, 1-false discovery proportion | | Negative Predicitve Value (NPV) | FN / N* | | In the credit default case we may want to increase **TPR** (recall, make sure we catch all defaults) at the expense of **FPR** (1-Specificity, clients we lose because we think they will default) This leads to a natural question: Can we adjust our classifiers TPR and FPR? Remember we are classifying `Yes` if $$ \log \frac{P(Y=\mathtt{Yes}|X)}{P(Y=\mathtt{No}|X)} > 0 \Rightarrow \\ P(Y=\mathtt{Yes}|X) > 0.5 $$ What would happen if we use $P(Y=\mathtt{Yes}|X) > 0.2$? ```{r, echo=FALSE} test_balance <- seq(0, 3000, len=100) plot(test_balance, lda_log_ratio(test_balance, lda_stats), type="l", xlab="Balance", ylab="Log Probability Ratio", cex=1.4) ``` ```{r, fig.width=12, message=FALSE} library(ROCR) pred <- prediction(lda_pred$posterior[,"Yes"], Default$default) layout(cbind(1,2)) plot(performance(pred, "tpr")) plot(performance(pred, "fpr")) ``` A way of describing the TPR and FPR tradeoff is by using the **ROC curve** (Receiver Operating Characteristic) and the **AUROC** (area under the ROC) ```{r, fig.width=12} auc <- unlist(performance(pred, "auc")@y.values) plot(performance(pred, "tpr", "fpr"), main=paste("LDA AUROC=", round(auc, 2)), lwd=1.4, cex.lab=1.7, cex.main=1.5) ``` Consider comparing an LDA model using all predictors in the dataset. ```{r} full_lda <- lda(default~., data=Default) full_lda_preds <- predict(full_lda, Default) pred_list <- list( balance_lda = lda_pred$posterior[,"Yes"], full_lda = full_lda_preds$posterior[,"Yes"], dummy = rep(0, nrow(Default))) pred_objs <- lapply(pred_list, prediction, Default$default) aucs <- sapply(pred_objs, function(x) unlist( performance(x, "auc")@y.values)) roc_objs <- lapply(pred_objs, performance, "tpr", "fpr") ``` ```{r, echo=FALSE} library(RColorBrewer) palette(brewer.pal(8,"Dark2")) ``` ```{r} for (i in seq(along=roc_objs)) { plot(roc_objs[[i]], add = i != 1, col=i, lwd=3, cex.lab=1.5) } legend("bottomright", legend=paste(gsub("_", " ", names(pred_list)), "AUROC=",round(aucs, 2)), col=1:3, lwd=3, cex=2) ``` Another metric that is frequently used to understand classification errors and tradeoffs is the precision-recall curve: ```{r, fig.width=10, fig.height=9} library(caTools) pr_objs <- lapply(pred_objs, performance, "prec", "rec") for (i in seq(along=pr_objs)) { plot(pr_objs[[i]], add = i != 1, col=i, lwd=3, cex.lab=1.5) } legend("bottomleft", legend=paste(gsub("_", " ", names(pred_list))), col=1:3, lwd=3, cex=2) ``` ### Summary We approach classification as a class probability estimation problem. Logistic regression and LDA partition predictor space with linear functions. Logistic regression learns parameter using Maximum Likelihood (numerical optimization), while LDA learns parameter using means and variances (and assuming normal distribution) Error and accuracy statistics are not enough to understand classifier performance. Classifications can be done using probability cutoffs to trade, e.g., TPR-FPR (ROC curve), or precision-recall (PR curve). Area under ROC or PR curve summarize classifier performance across different cutoffs. <file_sep>/materials/slides/presentation/index.rmd --- title: "Communicating Results of Data Analysis" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: ratio: "16:9" --- class: title-slide, center, middle count: false ```{r cowplot_setup, echo=FALSE, message=FALSE} library(cowplot) ``` .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Communicating Results of Data Analysis] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] --- ## For Today Data Analysis Deliverables: - Written analyses - R packages --- layout: true ## Written analyses --- Components: 1. Title 2. Introduction and motivation 3. Description of dataset 4. Description of statistical and machine learning models used (Methods) 5. Results (including measures of uncertainty) 6. Conclusions (including potential problems) 7. References .source[https://leanpub.com/datastyle] --- ### Introduction and Motivation Always lead with the question (task) you are addressing. E.g.: "Can we use tweets about stocks to predict stock prices?" Not: "Can we use the Random Forest algorithm to learn a classifier that predicts stock prices" E.g: "What are good predictors of student performance?" Not: "Can we use linear regression to predict student performance" --- ### Description of dataset Size: entities and attributes Important: describe what you did to 1) obtain, 2) tidy the dataset. --- ### Description of data analysis methods Be specific, use equations when appropriate: $$ W = a + b H + e $$ where $W$ is _weight_, $H$ is _height_ and $e$ is an error term. When appropriate mention distributional assumptions on $e$. --- ### Description of data analysis methods When using ML methods, describe: - preprocessing (e.g., feature selection, transformations) - algorithm choice (why is it appropriate) - model selection and assessment (e.g., which classification metric and why) --- ### Results - Report estimates in the appropriate units - Report estimates with uncertainty We saw confidence intervals on our previous lectures with specific advise regarding their presentation. (_Note_: this also applies to prediction metrics) --- ### Results **Important**: Summarize importance of estimate (i.e., refer to the question you originally posed in introduction) _Why does this estimate address your question?_ --- ### End matter - Include potential problems with the analysis you carried out. - Include references to the analysis methods used. --- layout: true ## Graphics --- <NAME>'s presentation on effective graphics: http://tinyurl.com/graphs2017 --- A few other notes on style: - Make titles legible - Annotate in plot if possible (see example data analysis early in semester) - Include units in axis titles when appropriate - E.g., not appropriate in PC scatterplot --- layout: true ## R packages Case study: suppose you used data to create a classifier for diagnostic purposes. How do you share? R packages is a reproducible, high-visibility way of publishing these types of results --- - Consistent organization - Standardized deployment --- Hadley's presentation on R packages [http://www.slideshare.net/hadley/r-packages](http://www.slideshare.net/hadley/r-packages) [The book](http://r-pkgs.had.co.nz/) <file_sep>/materials/lecture-notes/scratch/sql_flights.Rmd ### Exercise The `dplyr` package has a nice interface to database systems as well. As you may imagine, mapping the `dplyr` single and two-table verbs to SQL can be pretty straight forward. However, it also allows you to run SQL queries on these databases directly. See more information [in this introduction](https://cran.r-project.org/web/packages/dplyr/vignettes/databases.html). As an exercise you should try to answer our `dplyr` exercise questions using SQL queries and compare the results. As a reminder, here are the six questions: *Question 1*: Filter flights to include only flights with an arrival delay greater than 2 hours (delays are recorded in minutes). *Question 2*: Select columns in the flights table that contain delay times (note that the function `matches` can be used to select columns with names that partially match a given string. See `?dplyr::select`) *Question 3*: Find the minimum arrival delay (this can be a negative number) in the flights table. Use `summarize`. *Question 4*: Find minimum arrival delay (again, can be a negative number) in the flights table for each _destination_ airport. Use `group_by` and `summarize`. *Question 5*: List the name of **all** airlines and the number of flights for each airline in flights table. The `airlines` table contains airline names, so you will have to perform a join operation. *Question 6*: (a) Create a new column (use `mutate`) with total delay time for each flight. (b) Replace any missing _total delay times_ with the average (`mean`) total delay. (c) Same as (b) but now replace any missing _total delay times_ with the average (`mean`) total delay for the flight's route (i.e., origin/destination combination) <file_sep>/materials/slides/tidy-data/tidy-data.Rmd --- title: "Common operations in data tyding" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: chakra: libs/remark-0.14.0.min.js lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Common operations for data tidying] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] --- layout: true ## Tidying data --- ```{r setup_ch16, echo=FALSE, message=FALSE, cache=FALSE} library(tidyverse) select <- dplyr::select data_dir <- "data/vignettes" knitr::opts_chunk$set(cache=TRUE, message=FALSE) ``` - Common problems in data preparation: - Use cases commonly found in raw datasets that need to be addressed to turn messy data into tidy data. - We derive many of our ideas from the paper [Tidy Data](http://www.jstatsoft.org/v59/i10/paper) by <NAME>. --- Here we assume we are working with a data model based on rectangular data structures where 1. Each attribute (or variable) forms a column 2. Each entity (or observation) forms a row 3. Each type of entity (observational unit) forms a table --- Here is an example of a tidy dataset: ```{r} library(nycflights13) head(flights) ``` --- layout: true ## Common problems in messy data --- The set of common operations we will study are based on these common problems found in datasets. - Column headers are values, not variable names (gather) - Multiple variables stored in one column (split) - Variables stored in both rows and column (rotate) - Multiple types of observational units are stored in the same table (normalize) --- ### Headers as values The first problem we'll see is the case where a table header contains values. ```{r, echo=FALSE} library(tidyverse) pew <- read_csv(file.path(data_dir, "pew.csv")) pew ``` --- A tidy version of this table would consider the *variables* of each observation to be `religion, income, frequency` where `frequency` has the number of respondents for each religion and income range. --- The function to use in the `tidyr` package is `gather`: ```{r} tidy_pew <- gather(pew, income, frequency, -religion) tidy_pew ``` --- ### Multiple variables in one column ```{r} tb <- read_csv(file.path(data_dir, "tb.csv")) tb ``` --- - We need to `gather` the tabulation columns into a `demo` and `n` columns (for demographic and number of cases): ```{r} tidy_tb <- gather(tb, demo, n, -iso2, -year) tidy_tb ``` --- Need to `separate` the values in the `demo` column into two variables `sex` and `age` ```{r} tidy_tb <- separate(tidy_tb, demo, c("sex", "age"), sep=1) tidy_tb ``` --- We can put these two commands together in a pipeline: ```{r} tidy_tb <- tb %>% gather(demo, n, -iso2, -year) %>% separate(demo, c("sex", "age"), sep=1) tidy_tb ``` --- ### Variables stored in both rows and columns This is the messiest, commonly found type of data. ```{r} weather <- read_csv(file.path(data_dir, "weather.csv")) weather ``` --- We have two rows for each month: - one with maximum daily temperature - one with minimum daily temperature - the columns starting with `d` correspond to the day in the where the measurements were made. --- ```{r} weather %>% gather(day, value, d1:d31, na.rm=TRUE) %>% spread(element, value) ``` The new function we've used here is `spread`. It does the inverse of `gather` it spreads columns `element` and `value` into separate columns. --- ### Multiple types in one table Remember that an important aspect of tidy data is that it contains exactly one kind of observation in a single table. ```{r, echo=FALSE} billboard <- read_csv(file.path(data_dir, "billboard.csv")) tidy_billboard <- billboard %>% gather(week, rank, wk1:wk76, na.rm=TRUE) tidy_billboard ``` ```{r} tidy_billboard <- tidy_billboard %>% arrange(track) tidy_billboard ``` --- Let's make a song table that only includes information about songs: ```{r} song <- tidy_billboard %>% dplyr::select(artist, track, year, time, date.entered) %>% unique() song ``` --- Next, we would like to remove all the song information from the rank table. ```{r} song <- tidy_billboard %>% dplyr::select(artist, track, year, time, date.entered) %>% unique() %>% mutate(song_id = row_number()) song ``` --- Now we can make a rank table, we combine the tidy billboard table with our new song table using a `join`. ```{r} tidy_billboard %>% left_join(song, c("artist", "year", "track", "time", "date.entered")) ``` --- ```{r} rank <- tidy_billboard %>% left_join(song, c("artist", "year", "track", "time", "date.entered")) %>% dplyr::select(song_id, week, rank) rank ``` --- layout: false ## Tidy data and the ER model _tidy data_ as presented here is purposefully parallel to the ER model formalism. However, this formalism extends beyond what we've seen here targeted towards data analysis. Many features of the ER model formalism are more applicable to data management issues, especially consistency and redundancy. <file_sep>/materials/notebooks/two_tables_example.Rmd --- title: "Operating on all pairs of rows in two datasets" output: html_document: df_print: paged editor_options: chunk_output_type: inline --- A quick note about two ways of operating on all pairs of rows in two datasets. ## Data generation We will simulate two dataframes containing a datetime attribute, a categorical and a numeric attribute. Let's first define a function to generate dataframes. ```{r, message=FALSE} library(lubridate) library(tidyverse) generate_df <- function(dummy_name, n=10, min_date=ymd("2018/01/01"), max_date=ymd("2018/02/15"), cat_levels=letters[1:5], min_num=-10, max_num=10) { date_range <- seq(min_date, max_date, by=1) df <- tibble(date = sample(date_range, n, replace=TRUE), cat = factor(sample(cat_levels, n, replace=TRUE), levels=cat_levels), num = runif(n, min_num, max_num), dummy=runif(n, min_num, max_num)) colnames(df)[ncol(df)] <- dummy_name df } ``` Now, generate the dataframes. We use the function `rowid_to_column` to keep track of row indices as we operate on the dataframes. ```{r} set.seed(1234) df1 <- generate_df("a", n=5) %>% rowid_to_column() df2 <- generate_df("b", n=10) %>% rowid_to_column() df1 df2 ``` ```{r, echo=FALSE} write_csv(df1, path="two_tables_df1.csv") write_csv(df2, path="two_tables_df2.csv") ``` ## Similarity functions We will define a similarity function between rows of the two dataframes. For dates we will compute the squared difference in days between dates. For numeric values we also calculate the squared difference $d$. In both cases we use transformation $\exp{-d}$ to turn difference $d$ into a similarity. For the categorical attribute, we set similarity equal to 10 if values are equal, and 0 otherwise. ```{r} diff_to_similarity <- function(d) { exp(-d) } date_similarity <- function(d1, d2) { d <- (as.integer(d1 - d2))^2 diff_to_similarity(d) } cat_similarity <- function(v1, v2) { ifelse(v1 == v2, 10, 0) } num_similarity <- function(v1, v2) { d <- (v1 - v2)^2 diff_to_similarity(d) } ``` ## Version 1: Using matrices In the first version we create a similarity matrix and iterate over rows of the two tables to fill in values of the matrix. ```{r} sim_matrix <- matrix(NA, nrow(df1), nrow(df2)) for (i in seq(1, nrow(df1))) { for (j in seq(1, nrow(df2))) { s <- date_similarity(df1$date[i], df2$date[j]) s <- s + cat_similarity(df1$cat[i], df2$cat[j]) s <- s + num_similarity(df1$num[i], df2$num[j]) sim_matrix[i,j] <- s } } round(sim_matrix,2) ``` We can then use this similarity matrix as needed. To turn this into a tidy data frame we can use in subsequent analysis we could use something like this: ```{r} similarity_df <- sim_matrix %>% magrittr::set_colnames(seq(1,ncol(.))) %>% as_tibble() %>% rowid_to_column("df1_id") %>% tidyr::gather(df2_id, similarity, -df1_id) %>% mutate(df2_id = as.integer(df2_id)) similarity_df ``` To find out which row in `df2` have highest similarity for each row of `df1`, we can use the group_by and summarize construct as we have done previously. ```{r} similarity_df %>% group_by(df1_id) %>% summarize(max_sim = max(similarity), df2_match_id=df2_id[which.max(similarity)]) ``` ## Version 2: Using data frames We can also use operations that work within a pipeline for this task. However, it is a bit more convoluted, and I include it here for reference (and because it's fun to figure out how to do it). First we create a data frame with all pairwise combinations of row indices from each of the two data frames. ```{r} index_df <- df1 %>% select(df1_id="rowid") %>% mutate(df2_id=NA) %>% bind_rows(df2 %>% select(df2_id="rowid") %>% mutate(df1_id = NA)) %>% tidyr::expand(df1_id, df2_id) %>% tidyr::drop_na() index_df ``` Next, we populate that data frame with attributes from the two dataframes by using joins on the row indices. ```{r} similarity_df <- index_df %>% inner_join(df1 %>% select(rowid, date.df1=date, cat.df1=cat, num.df1=num), by=c(df1_id = "rowid")) %>% inner_join(df2 %>% select(rowid, date.df2=date, cat.df2=cat, num.df2=num), by=c(df2_id= "rowid"), suffix=c(".ind", ".df2")) ``` With all relevant attributes in one data frame we can compute similarity as before. ```{r} similarity_df <- similarity_df %>% mutate(date_sim = date_similarity(date.df1, date.df2)) %>% mutate(cat_sim = cat_similarity(cat.df1, cat.df2)) %>% mutate(num_sim = num_similarity(num.df1, num.df2)) %>% mutate(similarity = date_sim + cat_sim + num_sim) %>% select(df1_id, df2_id, similarity) ``` As in the first option, we can use the group_by and summarize construct. ```{r} similarity_df %>% group_by(df1_id) %>% summarize(max_sim = max(similarity), df2_match_id=df2_id[which.max(similarity)]) ``` <file_sep>/content/lecture-note/pca/index.md --- date: 2016-11-28T06:12:58-05:00 title: Dimensionality Reduction --- Exploratory Data Analysis of high-dimensional data using dimensionality reduction. [Lecture Notes](pca/) <file_sep>/materials/slides/operations/operations.Rmd --- title: "Operations" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: chakra: libs/remark-0.14.0.min.js lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Operations] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] --- ```{r setup1, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` ## Principles: Basic Operations Now that we have a data frame describing our data, let's learn a few fundamental operations we perform on data frames on almost any analysis. We divide these first set of operations into two groups: - operations on _attributes_ - operations on _entitites_. ```{r vars1, message=FALSE, echo=FALSE, result="hide"} library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") ``` ```{r echo=FALSE, eval=FALSE} arrest_tab$race <- factor(arrest_tab$race) arrest_tab$sex <- factor(arrest_tab$sex) arrest_tab$incidentOffense <- factor(arrest_tab$incidentOffense) ``` --- layout: true ## Operations that subset attributes --- class: split-60 ### `select` .column[ Suppose we only want to study patterns in these arrests based on a smaller number of attributes. In that case we would like to create a data frame that contains only those attributes of interest. ] .column[ ```{r select_img, echo=FALSE, fig.align="center", out.width="30%"} knitr::include_graphics("img/select.png") ``` ] --- Let's create a data frame containing only the `age`, `sex` and `district` attributes ```{r select_example} select(arrest_tab, age, sex, district) ``` The first argument to `select` is a data frame, and the value returned by `select` is also a data frame --- We can use an operator to describe ranges. E.g., `1:5` would be attributes 1 through 5: ```{r select_range} select(arrest_tab, 1:5) ``` --- layout: true ## Operations that subset entities --- class: split-60 ### `slice` .column[ We can choose specific entities by their row position. For instance, to choose entities in rows 1,3 and 10, we would use the following: ```{r slice, eval=FALSE} slice(arrest_tab, c(1, 3, 10)) ``` ] .column[ ```{r slice_fig, echo=FALSE, fig.align="center", out.width="40%"} knitr::include_graphics("img/subset.png") ``` ] --- As before, the first argument is the data frame to operate on. The second argument is a _vector_ of indices. We used the `c` function (for concatenate) to create a vector of indices. --- We can also use the range operator here: ```{r slice_range} slice(arrest_tab, 1:5) ``` --- To create general sequences of indices we would use the `seq` function. For example, to select entities in even positions we would use the following: ```{r slice_even} slice(arrest_tab, seq(2, nrow(arrest_tab), by=2)) ``` --- ### `filter` We can also select entities based on attribute properties. For example, to select arrests where age is less than 18 years old, we would use the following: ```{r filter} filter(arrest_tab, age < 18) ``` --- The second argument is an expression that evaluates to a vector of logical values (`TRUE` or `FALSE`), if the expression evaluates to TRUE for a given entity (row) then that entity (row) is part of the resulting data frame. --- Operators used frequently include: `==`, `!=`: tests equality and inequality respectively (categorical, numerical, datetimes, etc.) `<`, `>`, `<=`, `>=`: tests order relationships for ordered data types (not categorical) `!`, `&`, `|`: not, and, or, logical operators --- To select arrests with ages between 18 and 25 we can use ```{r filter_and} filter(arrest_tab, age >= 18 & age <= 25) ``` --- The filter function can take multiple logical expressions. In this case they are combined with `&`. So the above is equivalent to ```{r filter_and2} filter(arrest_tab, age >= 18, age <= 25) ``` --- ### `sample_n` and `sample_frac` Frequently we will want to choose entities from a data frame at random. The `sample_n` function selects a specific number of entities at random: ```{r sample_n} sample_n(arrest_tab, 10) ``` --- The `sample_frac` function selects a fraction of entitites at random: ```{r sample_frac} sample_frac(arrest_tab, .1) ``` --- layout: false ## Pipelines of operations All of the functions implementing our first set of operations have the same argument/value structure. They take a data frame as a first argument and return a data frame. We refer to this as the _data-->transform-->data_ pattern. This is the core a lot of what we will do in class as part of data analyses. Specifically, we will combine operations into _pipelines_ that manipulate data frames. --- In R, the `dplyr` package introduces _syntactic sugar_ to make this pattern explicit. ```{r sample_frac_pipe} arrest_tab %>% sample_frac(.1) ``` --- The `%>%` binary operator takes the value to its **left** and inserts it as the first argument of the function call to its **right**. So the expression `LHS %>% f(another_argument)` is **equivalent** to the expression `f(LHS, another_argument)`. In `pandas`, you can chain `.` calls. --- Using the `%>%` operator and the _data-->transform-->data_ pattern of the functions we've seen so far, we can create pipelines. --- For example, let's create a pipeline that: 1) filters our dataset to arrests between the ages of 18 and 25 2) selects attributes `sex`, `district` and `arrestDate` (renamed as `arrest_date`) 3) samples 50% of those arrests at random We will assign the result to variable `analysis_tab` --- ```{r pipeline} analysis_tab <- arrest_tab %>% filter(age >= 18, age <= 25) %>% select(sex, district, arrest_date=arrestDate) %>% sample_frac(.5) analysis_tab ``` --- **Exercise**: Create a pipeline that: 1) filters dataset to arrests from the "SOUTHERN" district occurring before "12:00" (`arrestTime`) 2) selects attributes, `sex`, `age` 3) samples 10 entities at random --- # Principles: More Operations ```{r sec06_setup, echo=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") ``` Next, we learn a few more fundamental data operations: sorting, creating new attributes, summarizing and grouping. Finally we will take a short detour through a discussion on vectors. --- layout: true ## Operations that sort entities --- Re-order entities based on the value of their `age` attribute, and then `slice` to create a data frame with just the entities of interest ```{r arrange_slice} arrest_tab %>% arrange(age) %>% slice(1:10) ``` --- The `arrange` operation sorts entities by increasing attribute values. Use `desc` helper to sort by decreasing value. E.g., find the arrests with the 10 _oldest_ subjects: ```{r arrange_desc_slice} arrest_tab %>% arrange(desc(age)) %>% slice(1:10) ``` --- layout: true ## Operations that create new attributes --- We will often see that for many analyses we will create new attributes based on existing attributes in a dataset. - This is helpful for interpretation, visualization and/or statistical modeling. ```{r mutate_fig, echo=FALSE, fig.align="center", out.width="50%"} knitr::include_graphics("img/mutate.png") ``` --- Suppose I want to represent age in months rather than years in our dataset. To do so I would multiply 12 to the existing age attribute. The function `mutate` creates new attributes based on the result of a given expression: ```{r mutate_age} arrest_tab %>% mutate(age_months = 12 * age) %>% select(arrest, age, age_months) ``` --- layout: true ## Operations that summarize (aggregate) attribute values over entities --- Collapse a data frame to a single row containing the desired attribute summaries. ```{r summarize_fig, echo=FALSE, fig.align="center", out.width="60%"} knitr::include_graphics("img/summarize.png") ``` --- Find minmum, maximum and average age in the dataset: ```{r summarize_mean} summarize(arrest_tab, min_age=min(age), mean_age=mean(age), max_age=max(age)) ``` --- | Operation(s) | Result | |-----------|-------------| | `mean`, `median` | average and median attribute value | | `sd` | standard deviation of attribute values | | `min`, `max` | minimum and maximum attribute values | | `n`, `n_distinct` | number of attribute values and number of _distinct_ attribute values | | `any`, `all` | is `any` attribute value TRUE, or are `all` attribute values TRUE | --- Let's see the number of distinct districts in our dataset: ```{r count_district} summarize(arrest_tab, n_distinct(district)) ``` We may also refer to these summarization operation as **aggregation** since we are computing _aggregates_ of attribute values. --- layout: true ## Operations that group entities --- Summarization (aggregation) goes hand in hand with data grouping, where summaries are computed _conditioned_ on other attributes. The notion of _conditioning_ is fundamental to data analysis and we will see it very frequently through the course. It is the basis of statistical analysis and Machine Learning models and it is essential in understanding the design of effective visualizations. --- The goal is to group entities with the same value of one or more attributes. The `group_by` function in essence annotates the rows of a data frame as belonging to a specific group based on the value of some chosen attributes. ```{r groupby_fig, echo=FALSE, fig.align="center", out.width="60%"} knitr::include_graphics("img/groupby.png") ``` --- Group entities by the value of the `district` attribute. ```{r groupby} group_by(arrest_tab, district) ``` --- Subsequent operations are then performed **for each group independently**. For example, when `summarize` is applied to a grouped data frame, summaries are computed for each group of entities, rather than the whole set of entities. --- Calculate minimum, maximum and average age for each district: ```{r groupby_summarize} arrest_tab %>% group_by(district) %>% summarize(min_age=min(age), max_age=max(age), mean_age=mean(age)) ``` --- `group_by/summarize` defines new entities. The entities in our original dataset are arrests. The entities for the result of the last example are the districts. This is a general property of group_by and summarize: it defines a data set where entities are defined by distinct values of the attributes we use for grouping. --- Another example: average age for subjects 21 years or older grouped by district and sex: ```{r groupby_example} arrest_tab %>% filter(age >= 21) %>% group_by(district, sex) %>% summarize(mean_age=mean(age)) ``` --- **Exercise**: Write a data operation pipeline that 1) filters records to the southern district and ages between 18 and 25 2) computes mean arrest age for each sex --- layout: true ## Vectors --- We briefly saw previously operators to create vectors in R. For instance, we can use `seq` to create a vector that consists of a sequence of integers: ```{r seq_example} multiples_of_three <- seq(3, 30, by=3) multiples_of_three ``` --- Let's how this is represented in R (the `str` is very handy to do this type of digging around): ```{r class_seq, cache=FALSE} str(multiples_of_three) ``` --- Like many other languages we use square brackets `[]` to index vectors: ```{r indexing} multiples_of_three[1] ``` --- We can use ranges as before ```{r index_range} multiples_of_three[1:4] ``` --- We can use vectors of non-negative integers for indexing: ```{r index_vec} multiples_of_three[c(1,3,5)] ``` --- Or even logical vectors: ```{r index_logical} multiples_of_three[c(TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE)] ``` --- In R, most operations are designed to work with vectors directly (we call that _vectorized_). For example, if I want to add two vectors together I would write: (look no `for` loop!): ```{r sum_vec} multiples_of_three + multiples_of_three ``` This also works for other arithmetic and logical operations (e.g., `-`, `*`, `/`, `&`, `|`). --- In data analysis the _vector_ is probably the most fundamental data type (other than basic numbers, strings, etc.). Why? Consider getting data about one attribute, say height, for a group of people. What do you get? An vector of numbers, all in the same unit (say feet, inches or centimeters). How about their name? Then you get a vector of strings. Abstractly, we think of vectors as arrays of values, all of the same _class_ or datatype. --- layout: true ## Attributes as vectors --- Each column, corresponding to an attribute, is a vector. We use the `pull` function to extract a vector from a data frame. We can then operate index them, or operate on them as vectors ```{r pull_age} age_vec <- arrest_tab %>% pull(age) age_vec[1:10] ``` --- Or, ```{r add_age} 12 * age_vec[1:10] ``` --- The `$` operator serves the same function. ```{r dollar} age_vec <- arrest_tab$age age_vec[1:10] ``` --- The `pull` function however, can be used as part of a pipeline (using operator `%>%`): ```{r mean_age_vec} arrest_tab %>% pull(age) %>% mean() ``` --- ## Functions How to abstract pipelines? Factor into reusable functions that we can apply in other analyses. E.g., a function that executes the age by district/sex summarization we created before: ```{r func_example} summarize_district <- function(df) { df %>% filter(age >= 21) %>% group_by(district, sex) %>% summarize(mean_age=mean(age)) } ``` --- You can include multiple expressions in the function definition (inside brackets `{}`). Notice there is no `return` statement in this function. When a function is called, it returns the value of the last expression in the function definition. In this example, it would be the data frame we get from applying the pipeline of operations. --- You can find more information about vectors, functions and other programming matters we might run into in class in Chapters 17-21 of [R for Data Science](http://r4ds.had.co.nz/program-intro.html) --- **Exercise** Abstract the pipeline you wrote in the previous unit into a function that works for arbitrary districts. The function should take arguments `df` and `district`. <file_sep>/materials/lecture-notes/index.rmd --- title: "Lecture Notes: Introduction to Data Science" subtitle: "CMSC320, University of Maryland, College Park" author: "<NAME>" date: "`r Sys.Date()`" fontsize: 12pt --- # Preamble These are lecture notes for [CMSC320, Introduction to Data Science](/IntroDataSci) at the University of Maryland, College Park. Refer to the [Course Web Page](/IntroDataSci) for further information. <file_sep>/materials/slides/clustering/index.rmd --- title: "Introduction to Data Science: Clustering Analysis" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: ratio: "16:9" --- class: title-slide, center, middle count: false ```{r cowplot_setup, echo=FALSE, message=FALSE} library(cowplot) theme_set(theme_cowplot()) ``` .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Clustering] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] --- ## Unsupervised Learning So far we have seen "Supervised Methods" where our goal is to analyze a _response_ (or outcome) based on various _predictors_. In many cases, especially for Exploratory Data Analysis, we want methods to extract patterns on variables without analyzing a specific _response_. Methods for the latter case are called "Unsupervised Methods". Examples are _Principal Component Analysis_ and _Clustering_. --- ## Unsupervised Learning Interpretation of these methods is much more _subjective_ than in Supervised Learning. For example: if we want to know if a given _predictor_ is related to _response_, we can perform statistical inference using hypothesis testing. --- ## Unsupervised Learning If we want to know which predictors are useful for prediction: use cross-validation to do model selection. Finally, if we want to see how well we can predict a specific response, we can use cross-validation to report on test error. --- ## Unsupervised Learning In unsupervised methods, there is no similar clean evaluation methodology. Nonetheless, they can be very useful methods to understand data at hand. --- ## Motivating Example Time series dataset of mortgage affordability as calculated and distributed by Zillow: https://www.zillow.com/research/data/. ```{r clustering_setup, echo=FALSE, message=FALSE, warning=FALSE} library(tidyverse) library(readr) library(lubridate) datadir <- "data" url <- "http://files.zillowstatic.com/research/public/Affordability_Wide_2017Q4_Public.csv" filename <- basename(url) datafile <- file.path(datadir, filename) if (!file.exists(datafile)) { download.file(url, file.path(datadir, filename)) } afford_data <- read_csv(datafile) ``` ```{r tidy_zillow, echo=FALSE, cache=TRUE} tidy_afford <- afford_data %>% filter(Index == "Mortgage Affordability") %>% drop_na() %>% filter(RegionID != 0) %>% dplyr::select(RegionID, matches("^[1|2]")) %>% gather(time, affordability, matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m"))) wide_afford_df <- tidy_afford %>% dplyr::select(RegionID, time, affordability) %>% spread(time, affordability) value_mat <- wide_afford_df %>% dplyr::select(-RegionID) %>% as.matrix() ``` ```{r zillow_stats, echo=FALSE} ncounties <- nrow(wide_afford_df) year_range <- range(year(tidy_afford$time)) ``` The dataset consists of monthly mortgage affordability values for `r ncounties` counties with data from `r min(year_range)` to `r max(year_range)`. --- ## Motivating Example > "To calculate mortgage affordability, we first calculate the mortgage payment for the median-valued home in a metropolitan area by using the metro-level Zillow Home Value Index for a given quarter and the 30-year fixed mortgage interest rate during that time period, provided by the Freddie Mac Primary Mortgage Market Survey (based on a 20 percent down payment)." --- ## Motivating Example > "Then, we consider what portion of the monthly median household income (U.S. Census) goes toward this monthly mortgage payment. Median household income is available with a lag. " --- class: split-60 ## Motivating Example .column[ ```{r zillow_plot1, echo=FALSE, cache=FALSE, warning=FALSE, fig.align="center"} tidy_afford %>% ggplot(aes(x=time,y=affordability,group=factor(RegionID))) + geom_line(color="GRAY", alpha=3/4, size=1/2) + labs(title="County-Level Mortgage Affordability over Time", x="Date", y="Mortgage Affordability") ``` ] .column[Can we partition counties into groups of counties with similar value trends across time?] --- ## Preliminaries In "Supervised Learning" we were concerned with estimates that minimize some error function relative to the outcome of interest $Y$: $$\mu(x) = \arg \min_{\beta} E_{Y|X} L(Y, \beta)$$ --- ## Preliminaries In order to do this, explicitly or not, the methods we were using would be concerned with properties of the conditional probability distribution $p(Y|X)$, without concerning itself with probability distribution $p(X)$ of the predictors themselves. --- ## Preliminaries In unsupervised learning, we are interested in properties of $p(X)$. In our example, what can we say about the distribution of home value time series? Since the dimensionality of $p(X)$ can be large, unsupervised learning methods seek to find structured representations of $p(X)$ that would be possible to estimate. --- ## Preliminaries In _clustering_ we assume that predictor space is partitioned and that $p(X)$ is defined over those partitions. In _dimensionality reduction_ we assume that $p(X)$ is really defined over a space (manifold) of smaller dimension. We will start studying clustering first. --- ## Cluster Analysis The high-level goal of cluster analysis is to organize objects (observations) that are _similar_ to each other into groups. We want objects within a group to be more _similar_ to each other than objects in different groups. -- Central to this high-level goal is how to measure the degree of _similarity_ between objects. A clustering method then uses the _similarity_ measure provided to it to group objects into clusters. ```{r setup_kmeans, echo=FALSE, warning=FALSE, message=FALSE} library(broom) library(stringr) ``` ```{r kmeans1, cache=FALSE, echo=FALSE} set.seed(1234) kmeans_res <- kmeans(value_mat, centers=9) augmented_data <- kmeans_res %>% broom::augment(wide_afford_df) %>% gather(time, affordability, matches("^X")) %>% mutate(time=stringr::str_replace(time, "X", "")) %>% type_convert(col_types=cols(time=col_date(format="%Y.%m.%d"))) %>% rename(cluster=.cluster) kmeans_centers <- kmeans_res %>% broom::tidy(col.names=colnames(value_mat)) %>% dplyr::select(cluster, matches("^[1|2]")) %>% gather(time, affordability, -cluster) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m-%d"))) ``` --- class: split-50 ## Cluster Analysis .column[ ```{r kmeans_plot, echo=FALSE, cache=TRUE, fig.height=6} augmented_data %>% ggplot(aes(x=time, y=affordability)) + geom_line(aes(group=RegionID), color="GRAY", alpha=1/2, size=1/2) + facet_wrap(~cluster) + geom_line(data=kmeans_centers, color="BLACK", alpha=1/2, size=1/2) + labs(main="Kmeans Clustering (k=9)", xlab="Date", ylab="affordability") + theme(axis.text.x=element_text(angle=45, hjust=1)) ``` ] .column[Result of the k-means algorithm partitioning the data into 9 clusters. The darker series within each cluster shows the average time series within the cluster. ] --- ## Dissimilarity-based Clustering For certain algorithms, instead of similarity we work with dissimilarity, often represented as distances. When we have observations defined over attributes, or predictors, we define dissimilarity based on these attributes. --- ## Dissimilarity-based Clustering Given measurements $x_{ij}$ for $i=1,\ldots,N$ observations over $j=1,\ldots,p$ predictors. Suppose we define a dissimilarity $d_j(x_{ij}, x_{i'j})$, we can then define a dissimilarity between objects as $$d(x_i, x_{i'}) = \sum_{j=1}^p d_j(x_{ij},x_{i'j})$$ --- ## Dissimilarity-based Clustering In the k-means algorithm, and many other algorithms, the most common usage is squared distance $$d_j(x_{ij},x_{i'j}) = (x_{ij}-x_{i'j})^2$$ We can use different dissimilarities, for example $$d_j(x_{ij}, x_{i'j}) = |x_{ij}-x_{i'j}|$$ which may affect our choice of clustering algorithm later on. --- ## Dissimilarity-based Clustering For categorical variables, we could set $$d_j(x_{ij},x_{i'j}) = \begin{cases} 0 \; \textrm{if } x_{ij} = x_{i'j} \\\\ 1 \; \textrm{o.w.} \end{cases}$$ --- ## Dissimilarity-based Clustering If the values the categorical variable have an intrinsic similarity Generalize using symmetric matrix $L$ with elements $L_{rr'} = L_{r'r}$, $L_{rr}=0$ and $L_{rr'} \geq 0$ otherwise. This may of course lead to a dissimilarity that is not a proper distance. --- ## K-means Clustering A commonly used algorithm to perform clustering is the K-means algorithm. It is appropriate when using squared Euclidean distance as the measure of object dissimilarity. $$\begin{aligned} d(x_{i},x_{i'}) & = \sum_{j=1}^p (x_{ij}-x_{i'j})^2 \\\\ {} & = \|x_i - x_{i'}\|^2 \end{aligned}$$ --- ## K-means Clustering K-means partitions observations into $K$ clusters, with $K$ provided as a parameter. Given some clustering, or partition, $C$, denote cluster assignment of observation $x_i$ to cluster $k \in \{1,\ldots,K\}$ is denoted as $C(i)=k$. -- K-means minimizes this clustering criterion: $$W(C) = \frac{1}{2} \sum_{k=1}^K \sum_{i: \, C(i)=k} \sum_{i':\, C(i')=k} \|x_i - x_{i'}\|^2$$ --- ## K-means Clustering This is equivalent to minimizing $$W(C) = \frac{1}{2}\sum_{k=1}^K N_k \sum_{i:\,C(i)=k} \|x_i - \bar{x}_k\|^2$$ with: - $\bar{x}_k=(\bar{x}_{k1},\ldots,\bar{x}_{kp})$ - $\bar{x}_{kj}$ is the average of predictor $j$ over the observations assigned to cluster $k$, - $N_k$ is the number of observations assigned to cluster $k$ --- ## K-means Clustering $$W(C) = \frac{1}{2}\sum_{k=1}^K N_k \sum_{i:\,C(i)=k} \|x_i - \bar{x}_k\|^2$$ Minimize the total distance given by each observation to the mean (centroid) of the cluster to which the observation is assigned. --- ## K-means Clustering An iterative algorithm is used to minimize this criterion 0. Initialize by choosing $K$ observations as centroids $m_1,m_2,\ldots,m_k$ 1. Assign each observation $i$ to the cluster with the nearest centroid, i.e., set $C(i)=\arg\min_{1 \leq k \leq K} \|x_i - m_k\|^2$ 2. Update centroids $m_k=\bar{x}_k$ 3. Iterate steps 1 and 2 until convergence --- class: split-30 ## K-means Clustering .column[ Here we illustrate the k-means algorithm over four iterations on our example data with $K=4$. ] ```{r kmeans_illustration, echo=FALSE, message=FALSE, cache=TRUE} set.seed(1234) K <- 4 nobs <- nrow(value_mat) centroid_indices <- sample(nobs, K) centroids <- value_mat[centroid_indices,] assign_cluster <- function(x, m) { # browser() xx <- rowSums(x^2) mm <- rowSums(m^2) xm <- tcrossprod(x, m) d <- xx - 2*xm d <- sweep(d, 2, mm, FUN="+") apply(d, 1, which.min) } get_centroids <- function(x, a) { inds <- split(seq_len(nobs), a) sapply(inds, function(i) colMeans(x[i,,drop=FALSE])) %>% t() } message("Iteration 1") assignments <- assign_cluster(value_mat, centroids) centroid_df <- as.data.frame(cbind(centroids, cluster=seq_len(K), iteration=1)) assignments_df <- cbind(wide_afford_df, cluster=assignments, iteration=1) for (it in seq(2,4)) { message("Iteration ", it) centroids <- get_centroids(value_mat, assignments) assignments <- assign_cluster(value_mat, centroids) tmp <- as.data.frame(cbind(centroids, cluster=seq_len(K), iteration=it)) centroid_df <- rbind(centroid_df, tmp) tmp <- cbind(wide_afford_df, cluster=assignments, iteration=it) assignments_df <- rbind(assignments_df, tmp) } ``` .column[ ```{r kmeans_illustration2, echo=FALSE, cache=TRUE} tall_assignments_df <- assignments_df %>% gather(time, affordability, matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m-%d"))) tall_centroids_df <- centroid_df %>% gather(time,affordability,matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m-%d"))) pl <- tall_assignments_df %>% ggplot(aes(x=time, y=affordability)) + geom_line(aes(group=RegionID), color="GRAY", alpha=1/2, size=1/2) + facet_grid(iteration~cluster) + geom_line(data=tall_centroids_df, color="BLACK") + labs(xlab="Date", ylab="mortgage affordability") + theme(axis.text.x=element_text(angle=45, hjust=1)) show(pl) ``` ] --- ## K-means Clustering Criterion $W(C)$ is reduced in each iteration so the algorithm is assured to converge. Not a convex criterion, the clustering we obtain may not be globally optimal. In practice, the algorithm is run with multiple initializations (step 0) and the best clustering achieved is used. --- ## K-means Clustering Also, selection of observations as centroids can be improved using the K-means++ algorithm: 0. Choose an observation as centroid $m_1$ uniformly at random 1. To choose centroid $m_k$, compute for each observation $i$ not chosen as a centroid the distance to the nearest centroid $d_i = \min_{1\leq l < k} \|x_i - m_l\|^2$ 2. Set centroid $m_k$ to an observation randomly chosen with probability $\frac{e^d_i}{\sum_{i'} e^d_{i'}}$ 3. Iterate steps 1 and 2 until $K$ centroids are chosen --- ## Choosing the number of clusters The number of parameters must be determined before running the K-means algorithm. There is no clean direct method for choosing the number of clusters to use in the K-means algorithm (e.g. no cross-validation method) ```{r gapstat, echo=FALSE, message=FALSE, warning=FALSE, cache=TRUE, results="hide"} set.seed(1234) gap_stat <- cluster::clusGap(value_mat, FUN=kmeans, nstart=15, B=100, K.max=9) gap_stat_df <- gap_stat$Tab %>% as_tibble() %>% rowid_to_column("k") ``` --- class: split-30 ## Choosing the number of clusters .column[ Looking at criterion $W(C)$ alone is not sufficient as the criterion will become smaller as the value of $K$ is reduced. ] .column[ ```{r logw_plot, echo=FALSE} gap_stat_df %>% ggplot(aes(x=k, y=logW)) + geom_line() + geom_point() ``` ] --- ## Choosing the number of clusters We can use properties of this plot for ad-hoc selection. Suppose there is a true underlying number $K^*$ of clusters in the data, - improvement in the $W_K(C)$ statistic will be fast for values of $K \leq K^*$ - slower for values of $K > K^*$. --- ## Choosing the number of clusters _Improvement in the $W_K(C)$ statistic will be fast for values of $K \leq K^*$_ In this case, there will be a cluster which will contain observations belonging to two of the true underlying clusters, and therefore will have poor within cluster similarity. As $K$ is increased, observations may then be separated into separate clusters, providing a sharp improvement in the $W_K(C)$ statistic. --- ## Choosing the number of clusters _Improvement in the $W_K(C)$ statistic will be slower for values of $K > K^*$_ In this case, observations belonging to a single true cluster are split into multiple cluster, all with generally high within-cluster similarity, Splitting these clusters further will not improve the $W_K(C)$ statistic very sharply. --- class: split-30 ## Choosing the number of clusters .column[The curve will therefore have an inflection point around $K^*$. ] .column[ ```{r logw_plot_again, echo=FALSE} gap_stat_df %>% ggplot(aes(x=k, y=logW)) + geom_line() + geom_point() ``` ] --- ## Choosing the number of clusters The _gap statistic_ is used to identify the inflection point in the curve. It compares the behavior of the $W_K(C)$ statistic based on the data with the behavior of the $W_K(C)$ statistic for data generated uniformly at random over the range of the data. Chooses the $K$ that maximizes the gap between these two $W_K(C)$ curves. --- class: split-30 ## Choosing the number of clusters .column[For this dataset, the gap statistic suggests there is no clear cluster structure and therefore $K=1$ is the best choice. A choice of $K=4$ is also appropriate.] .column[ ```{r gapstat_plot, echo=FALSE} factoextra::fviz_gap_stat(gap_stat) ``` ] --- ## Summary Clustering methods are intuitive methods useful to understand structure within unlabeled observations. K-means is a frequently used, easy to implement and interpret algorithm for clustering. <file_sep>/materials/lecture-notes/07-basic_plotting.Rmd # Basic plotting with `ggplot` ```{r setup_cache_07, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` ```{r setup_07, echo=FALSE, message=FALSE} library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") ``` We will spend a good amount of time in the course discussing data visualization. It serves many important roles in data analysis. We use it to gain understanding of dataset characteristics throughout analyses and it is a key element of communicating insights we have derived from data analyses with our target audience. In this section, we will introduce basic functionality of the `ggplot` package to start our discussion of visualization throughout the course. The `ggplot` package is designed to work well with the `tidyverse` set of packages. As such, it is designed around the Entity-Attribute data model. Also, it can be included as part of data frame operation pipelines. Let's start with a simple example. Let's create a _dot plot_ of the number of arrests per district in our dataset: ```{r district_dotplot} arrest_tab %>% group_by(district) %>% summarize(num_arrests=n()) %>% ggplot(mapping=aes(y=district, x=num_arrests)) + geom_point() ``` The `ggplot` design is very elegant, takes some thinking to get used to, but is extremely powerful. The central premise is to characterize the building pieces behind `ggplot` plots as follows: 1. The **data** that goes into a plot, a data frame of entities and attributes 2. The **mapping** between data attributes and graphical (aesthetic) characteristics 3. The *geometric* representation of these graphical characteristics So in our example we can fill in these three parts as follows: 1) **Data**: We pass a data frame to the `ggplot` function with the `%>%` operator at the end of the group_by-summarize pipeline. 2) **Mapping**: Here we map the `num_arrests` attribute to the `x` position in the plot and the `district` attribute to the `y` position in the plot. Every `ggplot` will contain one or more `aes` calls. 3) **Geometry**: Here we choose points as the _geometric_ representations of our chosen graphical characteristics using the `geom_point` function. In general, the `ggplot` call will have the following structure: ```{r ggplot_schema, eval=FALSE} <data_frame> %>% ggplot(mapping=aes(<graphical_characteristic>=<attribute>)) + geom_<representation>() ``` ## Plot Construction Details ### Mappings Some of the graphical characteristics we will commonly map attributes to include: | Argument | Definition | |----------|------------| | `x` | position along x axis | | `y` | position along y axis | | `color` | color | | `shape` | shape (applicable to e.g., points) | `size` | size | | `label` | string used as label (applicable to text) ### Representations Representations we will use frequently are | Function | Representation | |----------|----------------| | `geom_point` | points | | `geom_bar` | rectangles | | `geom_text` | strings | | `geom_smooth` | smoothed line (advanced) | | `geom_hex` | hexagonal binning | We can include multiple geometric representations in a single plot, for example points and text, by adding (`+`) multiple `geom_<representation>` functions. Also, we can include mappings inside a `geom_` call to map characteristics to attributes strictly for that specific representation. For example `geom_point(mapping=aes(color=<attribute>))` maps color to some attribute only for the point representation specified by that call. Mappings given in the `ggplot` call apply to _all_ representations added to the plot. This cheat sheet is very handy: https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf ## Frequently Used Plots We will look comprehensively at data visualization in more detail later in the course, but for now will list a few common plots we use in data analysis and how they are created using `ggplot`. Let's switch data frame to the `mpg` dataset for our examples: ```{r load_mpg} mpg ``` ### Scatter plot Used to visualize the relationship between two attributes. ```{r scatter_example} mpg %>% ggplot(mapping=aes(x=displ, y=hwy)) + geom_point(mapping=aes(color=cyl)) ``` ### Bar graph Used to visualize the relationship between a continuous variable to a categorical (or discrete) attribute ```{r bargraph_mpg} mpg %>% group_by(cyl) %>% summarize(mean_mpg=mean(hwy)) %>% ggplot(mapping=aes(x=cyl, y=mean_mpg)) + geom_bar(stat="identity") ``` ### Histogram Used to visualize the distribution of the values of a numeric attribute ```{r hist_mpg, message=FALSE} mpg %>% ggplot(mapping=aes(x=hwy)) + geom_histogram() ``` ### Boxplot Used to visualize the distribution of a numeric attribute based on a categorical attribute ```{r hist_dot} mpg %>% ggplot(mapping=aes(x=class, y=hwy)) + geom_boxplot() ``` **Exercise**: Make a box plot showing the distribution of ages for arrests for the SOUTHERN district conditioned on sex. <file_sep>/materials/slides/transform/transform.Rmd --- title: "Data Transformations" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Data Transformations] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r, echo=FALSE, warning=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(tidyverse) theme_set(theme_bw()) ``` --- layout: true ## EDA: Data Transformations --- How is data distributed? - visual EDA - quantitative summaries -- Now consider transformations of attributes: - help interpretation of data analyses - help application statistical and machine learning models --- layout: true ## Centering and scaling --- A very common and important transformation is to scale data to a common unit-less scale. Transforming variables from whatever units they are measured (e.g., diamond depth percentage) into "standard deviations away from the mean" units (_standard units_, or $z$-score). --- Given data $x = x_1, x_2, \ldots, x_n$, the transformation applied to obtain centered and scaled variable $z$ is: $$ z_i = \frac{(x_i - \overline{x})}{\mathrm{sd}(x)} $$ where $\overline{x}$ is the mean of data $x$, and $\mathrm{sd}(x)$ is its standard deviation. --- class: split-50 .column[ ```r diamonds %>% mutate(scaled_depth = (depth - mean(depth)) / sd(depth)) %>% ggplot(aes(x=scaled_depth)) + geom_histogram(binwidth=.5) ``` ] ```{r, echo=FALSE, fig.align="center", fig.width=6.5} library(ggplot2) data(diamonds) diamonds %>% mutate(scaled_depth = (depth - mean(depth)) / sd(depth)) %>% ggplot(aes(x=scaled_depth)) + geom_histogram(binwidth=.5) ``` Question: what is the mean of $z$? What is it's standard deviation? --- Another name for this transformation is to _standardize_ a variable. -- After transformation: - all variables in a dataset are in the same, and thus comparable, units. - all variables have the same mean and variance -- This is very helpful for _multivariate_ statistical and ML analyses --- On occasion, you will have use to apply transformations that only _center_ (but not scale) data: $$ z_i = (x_i - \overline{x}) $$ --- Or, apply transformations that only _scale_ (but not center) data: $$ z_i = \frac{x_i}{\mathrm{sd}(x)} $$ --- layout: true ## Treating categorical variables as numeric --- Many modeling algorithms work strictly on numeric measurements. For example: - linear regression, or - support vector machines Strictly defined for numeric measurements. -- In this case, need to transform categorical variables into something that we can treat as numeric. --- Let's see a couple of important guidelines for _binary_ variables: categorical variables that only take two values, e.g. - `health_insurance` Yes/No - `cat_picture` Yes/No --- One option is to encode one value of the variable as 1 and the other as 0. For instance: ```{r, echo=FALSE} library(ISLR) data(Wage) ``` ```{r} Wage %>% mutate(numeric_insurance = ifelse(health_ins == "1. Yes", 1, 0)) %>% select(year, age, health_ins, numeric_insurance) %>% head() ``` --- Another option is to encode one value as 1 and the other as -1: ```{r} Wage %>% mutate(numeric_insurance = ifelse(health_ins == "1. Yes", 1, -1)) %>% select(year, age, health_ins, numeric_insurance) %>% head() ``` --- The decision of which of these two transformations to use is based on the method to use or the goal of your analysis. -- E.g, predict `wage` based on `health insurance status` (coded as 0/1) let's us make statements like: "on average, wage increases by $XX if a person has health insurance". -- But, to predict `health insurance status` based on other attributes, a Support Vector Machine requires `health insurance status` to be coded as +1/-1 --- For categorical attributes with more than two values, we extend this idea and encode _each_ value of the categorical variable as a 0/1 column. You will see this referred to as _one-hot-encoding_. --- ```{r} Wage %>% mutate(race_white = ifelse(race == "1. White", 1, 0), race_black = ifelse(race == "2. Black", 1, 0), race_asian = ifelse(race == "3. Asian", 1, 0), race_other = ifelse(race == "4. Other", 1, 0)) %>% select(starts_with("race")) %>% sample_n(5) ``` --- layout: true ### Discretizing continuous values. --- How about transforming data in the other direction, from continuous to discrete values. This can make it easier to compare differences related to continuous measurements: Do doctors prescribe a certain medication to older kids more often? Is there a difference in wage based on age? --- It is also a useful way of capturing non-linear relationships in data: we will see this in our regression and prediction unit. Two standard methods used for discretization are to use **equal-length** bins, where variable range is divided into bins _regardless_ of the data distribution. --- class: split-50 ```{r, eval=TRUE, echo=FALSE} library(nycflights13) ``` .column[ ```r flights %>% mutate(dep_delay_discrete = cut(dep_delay, breaks=100)) %>% ggplot(aes(x=dep_delay_discrete)) + geom_bar() ``` ] .column[ ```{r, fig.align="center", fig.width=6.5, echo=FALSE} flights %>% mutate(dep_delay_discrete = cut(dep_delay, breaks=100)) %>% ggplot(aes(x=dep_delay_discrete)) + geom_bar() ``` ] --- The second approach uses **equal-sized** bins, where the range is divided into bins _based_ on data distribution --- class: split-50 .column[ ```r flights %>% mutate(dep_delay_discrete = cut(dep_delay, breaks=quantile(dep_delay, probs=seq(0,1,len=11), na.rm=TRUE))) %>% ggplot(aes(x=dep_delay_discrete)) + geom_bar() ``` ] .column[ ```{r, eval=TRUE, echo=FALSE, fig.align="center", fig.width=6.5} flights %>% mutate(dep_delay_discrete = cut(dep_delay, breaks=quantile(dep_delay, probs=seq(0,1,len=11), na.rm=TRUE))) %>% ggplot(aes(x=dep_delay_discrete)) + geom_bar() ``` ] --- layout: true ## Skewed Data --- In many data analysis, variables will have a _skewed_ distribution over their range. In the last section we saw one way of defining skew using quartiles and median. Variables with skewed distributions can be hard to incorporate into some modeling procedures, especially in the presence of other variables that are not skewed. --- Skewed data may arise when measuring *multiplicative* processes. In this case, interpretation of data may be more intiuitive after a transformation. We have seen an example of skewed data previously when we looked at departure delays in our flights dataset. --- class: split-50 ```{r echo=FALSE} library(tidyverse) library(nycflights13) ``` .column[ ```r flights %>% ggplot(aes(x=dep_delay)) + geom_histogram(binwidth=30) ``` ] .column[ ```{r, fig.width=6.5, fig.align="center", echo=FALSE, warning=FALSE} flights %>% ggplot(aes(x=dep_delay)) + geom_histogram(binwidth=30) ``` ] --- In many cases a logarithmic transform is an appropriate transformation to reduce data skew: - If values are all positive: apply `log2` transform - If some values are negative, two options - Started Log: shift all values so they are positive, apply `log2` - Signed Log: $sign(x) \times log2(abs(x) + 1)$. --- class: split-50 Here is a signed log transformation of departure delay data: .column[ ```r transformed_flights <- flights %>% mutate(transformed_dep_delay = sign(dep_delay) * log2(abs(dep_delay) + 1)) transformed_flights %>% ggplot(aes(x=transformed_dep_delay)) + geom_histogram(binwidth=1) ``` ] .column[ ```{r, echo=FALSE, fig.align="center", fig.width=6, warning=FALSE, fig.height=5.5} transformed_flights <- flights %>% mutate(transformed_dep_delay = sign(dep_delay) * log2(abs(dep_delay) + 1)) transformed_flights %>% ggplot(aes(x=transformed_dep_delay)) + geom_histogram(binwidth=1) ``` ] --- layout: false ## Summary Given what we learn from EDA (visually and statistically), we can guide decisions on data transformations - Change data types continuous <-> numeric - Standardization - Log-transforms (reduce skew, also variance stabilization) <file_sep>/materials/lecture-notes/11-single_table_sql.Rmd # SQL I: Single Table Queries The Structured-Query-Language (SQL) is the predominant language used in database systems. It is tailored to the Relational data representation model. SQL is a declarative language, we don't write a _procedure_ to compute a relation, we _declare_ what the relation we want to compute looks like. The actual execution is determined and optimized by the database engine. However, there are clear mappings between parts of SQL queries and the operations we have defined so far as implemented in the `tidyverse`. The basic construct in SQL is the so-called `SFW` construct: _select-from-where_ which specifies: - _select_: which attributes you want the answer to have - _from_: which relation (table) you want the answer to be computed from - _where_: what conditions you want to be satisfied by the rows (tuples) of the answer E.g.: movies produced by disney in 1990: note the *rename* ```sql select m.title, m.year from movie m where m.studioname = 'disney' and m.year = 1990 ``` The **select** clause can contain expressions (this is paralleled by the `mutate` operation we saw previously) - `select title || ' (' || to_char(year) || ')' as titleyear` - `select 2014 - year` The **where** clause support a large number of different predicates and combinations thereof (this is parallel to the `filter` operation) - `year between 1990 and 1995` - `title like 'star wars%'` ` title like 'star wars _'` We can include ordering, e.g., find distinct movies sorted by title ```sql select distinct title from movie where studioname = 'disney' and year = 1990 order by title; ``` ## Group-by and summarize SQL has an idiom for grouping and summarizing (_conditioning_ as we called it before). Remember this is a very important concept that shows up in many data processing platforms - What it does: Partition the tuples by the group attributes (*year* in this case), and do something (*compute avg* in this case) for each group - Number of resulting tuples == Number of groups E.g., compute the average movie length by year ```sql select name, avg(length) from movie group by year ``` ## Subqueries You can nest queries as an expression in an SFW query. We refer to these "subqueries" as "nested subquery": E.g., find movie with the maximum length ```sql select title, year from movie where movie.length = (select max(length) from movie); ``` E.g., find movies with at least 5 stars: an example of a correlated subquery ```sql select * from movies m where 5 >= (select count(*) from starsIn si where si.title = m.title and si.year = m.year); ``` The nested subquery counts the number of actors for that movie. E.g., rank movies by their length. ```sql select title, year, (select count(*) from movies m2 where m1.length <= m2.length) as rank from movies m1; ``` Key insight: A movie is ranked 5th if there are exactly 4 movies with longer length. Most database systems support some sort of a *rank* keyword for doing this. Notice that the above query doesn't work in presence of ties etc. <file_sep>/materials/projects/Project2.Rmd --- title: "Project 2: Wrangling and Exploratory Data Analysis" author: "CMSC320" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` **Posted:** March 11, 2020 **Last Updated:** `r format(Sys.Date(), "%b %d, %Y")` In this project you will apply your data wrangling and exploratory data analysis skills to baseball business data. In particular, we want to know how well did Moneyball work for the Oakland A's. Was it worthy of a movie? ![](moneyball.jpg) # A bit of background We'll be looking at data about teams in Major League Baseball. A couple of important points to remember: - Major League Baseball is a professional baseball league, where teams pay players to play baseball. - The goal of each team is to win as many games out of a 162 game season as possible. - Teams win games by scoring more runs than their adversary. - In principle, better players are costlier so teams that want good players need to spend more money. - Teams that spend the most frequently win the most. So, the question is, how can a team win when they can't spend so much? The basic idea that Oakland (and other teams) used is to *redefine* what makes a player *good*. I.e., figure out what player characteristics translated into *wins*. Once they realized that teams were not really pricing players using these characteristics, they could exploit this to pay for undervalued players, players that were *good* according to their metrics, but were not recognized as such by other teams, and therefore not as expensive. You can get more information about this period in baseball history from: - [Wikipedia](http://en.wikipedia.org/wiki/Moneyball) - [The Moneyball book](http://www.amazon.com/Moneyball-The-Winning-Unfair-Game/dp/0393324818) - [The Moneyball movie](http://www.imdb.com/title/tt1210166/) BTW, you may find this post informative: https://fivethirtyeight.com/features/dont-be-fooled-by-baseballs-small-budget-success-stories/ # The Data We will be using the same dataset as HW2 (the SQL homework). Remember this is a useful database on baseball teams, players and seasons curated by Sean Lahman available at [http://www.seanlahman.com/baseball-archive/statistics/](http://www.seanlahman.com/baseball-archive/statistics/). The database has been made available as a `sqlite` database [https://github.com/jknecht/baseball-archive-sqlite](https://github.com/jknecht/baseball-archive-sqlite). You can check HW2 again for instructions setting up. # The question We want to understand how efficient teams have been historically at spending money and getting wins in return. In the case of Moneyball, one would expect that Oakland was not much more efficient than other teams in their spending before 2000, were much more efficient (they made a movie about it after all) between 2000 and 2005, and by then other teams may have caught up. Your job in this project is to see how this is reflected in the data we have. # Wrangling The data you need to answer these questions is in the `Salaries` and `Teams` tables of the database. **Problem 1** Using SQL, write a query to compute the total payroll and winning percentage (number of wins / number of games * 100) for each team (that is, for each `teamID` and `yearID` combination). You should include other columns that will help when performing EDA later on (e.g., franchise ids, number of wins, number of games). **Note:** We will only use data for years 1990-2014 (inclusive) for the project. Make sure your query includes a `WHERE` clause to capture this. As in HW2 you will write your query inside an Rmarkdown document or Jupyter notebook. In Rmarkdown you can use the following pattern to capture the result of the SQL query into a data frame for use later on in the Rmarkdown documnt (see the argument `output.var` in the `sql` query chunk: ```{r echo=FALSE, comment=""} cat(htmltools::includeText("project2_sql.Rmd")) ``` More info on using SQL within Rmarkdown here: https://rmarkdown.rstudio.com/authoring_knitr_engines.html#sql In a Jupyter notebook you saw in HW2 how to use `pandas` to perform a sql query. Follow the same pattern for this project. _Note_: This combination of SQL and R (or python) is a very common workflow in Data Science projects. For data in databases, you use SQL to extract data (aggregates for example) computed on the database server itself. Then use those results for exploratory and other downstream analysis in R or python. # Exploratory data analysis ## Payroll distribution **Problem 2**. Write code to produce a plot (or plots) that shows the distribution of payrolls across teams _conditioned_ on year (from 1990-2014). Note: you may create a single plot as long as the distributions for each year are **clearly** distinguishable (e.g., a single plot overlaying histograms is not OK). **Question 1**. What statements can you make about the distribution of payrolls conditioned on time based on these plots? Remember you can make statements in terms of central tendency, spread, etc. **Problem 3**. Write code to produce a plot (or plots) that specifically shows at least one of the statements you made in Question 1. For example, if you make a statement that there is a trend for payrolls to decrease over time, make a plot of a statistic for central tendency (e.g., mean payroll) vs. time to show that specifically. ## Correlation between payroll and winning percentage **Problem 4**. Write code to discretize year into five time periods (e.g., using the `cut` function with parameter `breaks=5` (in R, `bins=5` in python) and then make a scatterplot showing mean winning percentage (y-axis) vs. mean payroll (x-axis) for each of the five time periods. You could add a regression line (using `geom_smooth(method=lm)`) in each scatter plot to ease interpretation. Note: look at the discussion on _faceting_ in the visualization EDA lecture notes. **Question 2**. What can you say about team payrolls across these periods? Are there any teams that standout as being particularly good at paying for wins across these time periods? What can you say about the Oakland A's spending efficiency across these time periods (labeling some points in the scatterplot can help interpretation). # Data transformations ## Standardization across years It looks like comparing payrolls across years is problematic so let's do a transformation that will help with these comparisons. **Problem 5**. Write code to create a new variable in your dataset that standardizes payroll conditioned on year. So, this column for team $i$ in year $j$ should equal $$ \mathrm{standardized\_payroll}_{ij}=\frac{\mathrm{payroll}_{ij} - \overline{\mathrm{payroll}_{\cdot j}}}{s_{\cdot j}} $$ where $\overline{\mathrm{payroll}_{\cdot j}}$ is the average payroll for year $j$, and $s_{\cdot j}$ is the standard deviation of payroll for year $j$. **Problem 6**. Repeat the same plots as Problem 4, but use this new standardized payroll variable. **Question 3**. Discuss how the plots from Problem 4 and Problem 6 reflect the transformation you did on the payroll variable. Consider data range, center and spread along with observed correlation in your discussion. Some of these change after the transformation, others don't. ## Expected wins It's hard to see global trends across time periods using these multiple plots, but now that we have standardized payrolls across time, we can look at a single plot showing correlation between winning percentage and payroll across time. **Problem 7**. Make a _single_ scatter plot of winning percentage (y-axis) vs. standardized payroll (x-axis). Add a regression line to highlight the relationship (again using `geom_smooth(method=lm)`). The regression line gives you expected winning percentage as a function of standardized payroll. Looking at the regression line, it looks like teams that spend roughly the average payroll in a given year will win 50% of their games (i.e. `win_pct` is 50 when `standardized_payroll` is 0), and teams increase 5% wins for every 2 standard units of payroll (i.e., `win_pct` is 55 when `standardized_payroll` is 2). We will see how this is done in general using linear regression later in the course. From these observations we can calculate an _expected win percentage_ for team $i$ in year $j$ as $$ \mathrm{expected\_win\_pct}_{ij} = 50 + 2.5 \times \mathrm{standardized\_payroll}_{ij} $$ ## Spending efficiency Using this result, we can now create a single plot that makes it easier to compare teams efficiency. The idea is to create a new measurement unit for each team based on their winning percentage and their expected winning percentage that we can plot across time summarizing how efficient each team is in their spending. **Problem 8**. Write code to calculate spending efficiency for each team $$ \mathrm{efficiency}_{ij} = \mathrm{win\_pct}_{ij} - \mathrm{expected\_win\_pct}_{ij} $$ for team $i$ in year $j$, where `expected_win_pct` is given above. Make a line plot with year on the x-axis and efficiency on the y-axis. A good set of teams to plot are Oakland, the New York Yankees, Boston, Atlanta and Tampa Bay (teamIDs `OAK`, `BOS`, `NYA`, `ATL`, `TBA`). That plot can be hard to read since there is so much year to year variation for each team. One way to improve it is to use `geom_smooth` instead of `geom_line`. **Question 4**. What can you learn from this plot compared to the set of plots you looked at in Question 2 and 3? How good was Oakland's efficiency during the Moneyball period? ## Submission Prepare and knit to PDF an Rmarkdown file or Jupyter notebook that includes for each Problem: (a) code to carry out the step discussed, (b) output showing enough of the result of your code to understand the result, and (c) a short prose description of how your code works. Remember, the writeup you are preparing is intended to communicate your data analysis effectively. Thoughtlessly showing large amounts of output in your writeup defeats that purpose. All axes in plots should be labeled in an informative manner. Your answers to any question that refers to a plot should include both (a) a text description of your plot, and (b) a sentence or two of interpretation as it relates to the question asked. <file_sep>/materials/lecture-notes/09-best_practices.Rmd # Best Practices for Data Science Projects See this slidedeck for discussion of reproducibility, bias, ethics and responsibility along with some practical tips: [Best Practices Slidedeck](/IntroDataSci/slides/cmsc320_best-practices.pdf) <file_sep>/content/lecture-note/statlearn/index.md --- date: 2016-10-10T09:42:48-04:00 title: Introduction to Statistical Learning --- Introduction to randomness, distributions, inference and testing [Lecture Notes: Distributions, Inference and Testing](IntroStatLearn/) ## Resources - [Intro Stat with Randomization and Simulation](https://www.openintro.org/stat/textbook.php?stat_book=isrs) - [OpenIntro Statistics](https://www.openintro.org/stat/textbook.php?stat_book=os) - [Cartoon guide to Statistics](http://www.amazon.com/Cartoon-Guide-Statistics-Larry-Gonick/dp/0062731025) <file_sep>/materials/lecture-notes/30-tree_methods.Rmd # Tree-Based Methods ```{r, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` We saw in previous units the limitation of using linear methods for classification. In particular, the partition of predictor space into regions using a linear model like logistic regression is very limiting. In this unit, we look at a set of elegant and versatile methods that allow these regions to take more complex shapes, but still produce models that are interpretable. These are very popular, well-known and studied methods in Statistical Learning. We will concentrate on Regression and Decision Trees and their extension to Random Forests. ## Regression Trees Consider a task where we are trying to predict a car's fuel consumption in miles per gallon based on the car's weight. A linear model in this case is not a good fit. ```{r, echo=FALSE, cache=FALSE} library(tree) library(ISLR) library(RColorBrewer) palette(brewer.pal(8, "Dark2")) data(Auto) with(Auto, plot(weight, mpg, pch=19, cex=1.4)) ``` Let's take a look at what a regression tree estimates in this case. ```{r} library(tree) tree <- tree(mpg~weight, data=Auto) plot(tree) text(tree, pretty=0, cex=1.3) ``` The decision trees partitions the `weight` predictor into regions based on its value. We can show this graphically as below. The idea behind the regression tree is that outcome $Y$ (`mpg` in this case) is estimated (or predicted) to be it's mean _within each of the data partitions_. Think of it as the conditional mean of $Y$ where conditioning is given by this region partitioning. ```{r, echo=FALSE, cache=FALSE, results="hide"} library(RColorBrewer) palette(brewer.pal(8, "Dark2")) with(Auto, plot(weight, mpg, pch=19, cex=1.4)) #abline(h=subset(tree$frame, grepl("leaf", tree$frame$var))$yval) abline(v=as.numeric(gsub("<", "", subset(tree$frame, !grepl("leaf", tree$frame$var))$splits[,"cutleft"]))) process_node <- function(i, left, right) { if (tree$frame$var[i] == "<leaf>") { val <- as.numeric(tree$frame$yval[i]) segments(left, val, right, val, col="red", lwd=5) } else { val <- as.numeric(gsub("<","",tree$frame$splits[i, "cutleft"])) i <- process_node(i+1, left, val) i <- process_node(i+1, val, right) } i } process_node(1, .85*min(Auto$weight), 1.05*max(Auto$weight)) ``` Regression and decision trees operate by prediction an outcome variable $Y$ by partitioning feature (predictor) space. The regression tree model then: 1. Partitions space into $J$ non-overlapping regions, $R_1, R_2, \ldots, R_J$. 2. For every observation that falls within region $R_j$, predict response as mean of response for training observations in $R_j$. The important observation is that **Regression Trees create partition recursively** For example, consider finding a good predictor $j$ to partition space its axis. A recursive algorithm would look like this: 1. Find predictor $j$ and value $s$ that minimize RSS: $$ \sum_{i:\, x_i \in R_1(j,s))} (y_i - \hat{y}_{R_1})^2 + \sum_{i:\, x_i \in R_2(j,s))} (y_i - \hat{y}_{R_2})^2 $$ Where $R_1$ and $R_2$ are regions resulting from splitting observations on predictor $j$ and value $s$: $$ R_1(j,s) = \{X|X_j < s\} \mathrm{ and } R_2(j,s) \{X|X_j \geq s\} $$ This is then applied recursively to regions $R_1$ and $R_2$. Within each region a prediction is made using $\hat{y}_{R_j}$ which is the mean of the response $Y$ of observations in $R_j$. ![](img/8.3.png) Consider building a model that used both `horsepower` and `weight`. In this plot the value of the response $Y$ is indicated by the size of the point. ```{r, echo=FALSE} with(Auto, { plot(horsepower, weight, cex=mpg/median(mpg), pch=19) qs <- quantile(mpg, p=seq(0,1, len=5)) legend("bottomright", pch=19, legend=qs, pt.cex=qs/median(mpg)) }) ``` This is what a decision tree would look like for these two predictors: ```{r} tree <- tree(mpg~horsepower+weight, data=Auto) plot(tree) text(tree, pretty=0) ``` ```{r, echo=FALSE, cache=FALSE} process_node <- function(i, j, left, right, bottom, top, dat) { var <- as.character(tree$frame$var[i]) is_leaf <- grepl("leaf", var) if (is_leaf) { val <- as.numeric(tree$frame$yval[i]) dat[j,] <- c(j, left, right, bottom, top, val) j <- j + 1 } else { val <- as.numeric(gsub("<","",tree$frame$splits[i, "cutleft"])) if (var == "horsepower") { res <- process_node(i+1, j, left, val, bottom, top, dat) i <- res$i; j <- res$j; dat <- res$dat res <- process_node(i+1, j, val, right, bottom, top, dat) i <- res$i; j <- res$j; dat <- res$dat } else { res <- process_node(i+1, j, left, right, bottom, val, dat) i <- res$i; j <- res$j; dat <- res$dat res <- process_node(i+1, j, left, right, val, top, dat) i <- res$i; j <- res$j; dat <- res$dat } } list(i=i, j=j, dat=dat) } nleaves <- sum(grepl("leaf", tree$frame$var)) region_dat <- data.frame(j=integer(nleaves), left=numeric(nleaves), right=numeric(nleaves), bottom=numeric(nleaves), top=numeric(nleaves), val=numeric(nleaves)) res <- process_node(1, 1, .85*min(Auto$horsepower), 1.05*max(Auto$horsepower), .85*min(Auto$weight), 1.05*max(Auto$weight), region_dat) region_dat <- res$dat with(Auto, { plot(horsepower, weight, cex=mpg/median(mpg), pch=19) qs <- quantile(mpg, p=seq(0,1, len=5)) legend("bottomright", pch=19, legend=qs, pt.cex=qs/median(mpg)) }) with(region_dat, { segments(left, bottom, right, bottom) segments(left, top, right, top) segments(left, bottom, left, top) segments(right, bottom, right, top) text(.5*(left+right), .5*(top+bottom), labels=j, cex=4, col="red") }) ``` ```{r, echo=FALSE, eval=FALSE} plot(tree) text(tree, pretty=0) ``` ## Classification (Decision) Trees Classification, or decision trees, are used in classification problems, where the outcome is categorical. The same partitioning principle holds, but now, each region predicts the majority class for training observations within region. The recursive partitioning method requires a score function to choose predictors (and values) to partition with. In classification we could use a naive approach of looking for partitions that minimize training error. However, better performing approaches use more sophisticated metrics, which we will see shortly. Let's look at how a classification tree performs on a credit card default dataset. ```{r, echo=FALSE, dev="png"} data(Default) with(Default, { plot(balance, income, pch=ifelse(student=="Yes", 19, 21), col=default) legend("topright", pch=c(19,21,19,19), col=c("black","black",1,2), legend=c("Student", "Not Student","Not Default","Default")) }) ``` ```{r} default_tree <- tree(default~student+balance+income, data=Default) plot(default_tree) text(default_tree, pretty=0) ``` ## Specifics of the partitioning algorithm ### The predictor space Suppose we have $p$ explanatory variables $X_1,\ldots,X_p$ and $N$ observations. Each of the $X_i$ can be a) a numeric variable: there are $n-1$ possible splits b) an ordered factor (categorical variable): there are $k-1$ possible splits c) an unordered factor: $2^{k-1}-1$ possible splits. ### Learning Strategy The general procedure for tree learning is the following: 1. **Grow** an overly large tree using forward selection as follows: at each step, find the *best* split among all attributes. Grow until all terminal nodes either (a) have $< m$ (perhaps $m=1$) data points (b) are "pure" (all points in a node have [almost] the same outcome). 2. **Prune** the tree back, creating a nested sequence of trees, decreasing in *complexity* ### Tree Growing The recursive partitioning algorithm is as follows: INITIALIZE All cases in the root node REPEAT Find optimal allowed split Partition leaf according to split STOP Stop when pre-defined criterion is met A problem in tree construction is how to use the training data to determine the binary splits of dataset $\mathcal{X}$ into smaller and smaller pieces. The fundamental idea is to select each split of a subset so that the data in each of the descendent subsets are "purer" than the data in the parent subset. ### Deviance as a measure of impurity A simple approach is to assume a multinomial model and then use deviance as a definition of impurity. Assume $Y \in \mathcal{G}=\{1,2,\ldots,k\}$. * At each node $i$ of a classification tree we have a probability distribution $p_{ik}$ over the $k$ classes. * We observe a random sample $n_{ik}$ from the multinomial distribution specified by the probabilities $p_{ik}$. * Given $X$, the conditional likelihood is then proportional to $\prod_{(\text{leaves } i)} \prod_{(\text{classes } k)} p_{ik}^{n_{ik}}$. * Define a deviance $D=\sum D_i$, where $D_i=-2\sum_k n_{ik} \log(p_{ik})$. * Estimate $p_{ik}$ by $\hat{p}_{ik}=\frac{n_{ik}}{n_i}$. ### Other measures of impurity Other commonly used measures of impurity at a node $i$ of a classification tree are **missclasification rate**: $\frac{1}{n_i} \sum_{j\in A_i} I(y_j \neq k_i)=1-\hat{p}_{ik_i}$ **entropy**: $\sum p_{ik} \log(p_{ik})$ **GINI index**: $\sum_{j\neq k} p_{ij}p_{ik} = 1-\sum_k p_{ik}^2$ where $k_i$ is the most frequent class in node $i$. For regression trees we use the residual sum of squares: $$ D = \sum_{\text{cases } j} (y_j-\mu_{[j]})^2 $$ where $\mu_{[j]}$ is the mean values in the node that case $j$ belongs to. ### Tree Pruning * Grow a big tree $T$ * Consider snipping off terminal subtrees (resulting in so-called rooted subtrees) * Let $R_i$ be a measure of impurity at leaf $i$ in a tree. Define $R=\sum_i R_i$ * Define size as the number leaves in a tree * Let $R_{\alpha} = R + \alpha \times \mathrm{size}$ The set of rooted subtrees of $T$ that minimize $R_{\alpha}$ is nested. ## Properties of Tree Method Good properties of Regression and Classification trees include: * Decision trees are very "natural" constructs, in particular when the explanatory variables are catgorical (and even better when they are binary) * Trees are easy to explain to non-data analysts * The models are invariant under transformations in the predictor space * Multi-factor responses are easily dealt with * The treatment of missing values is more satisfactory than for most other models * The models go after interactions immediately, rather than as an afterthought * Tree growth is much more efficient than described here However, they do have important issues to address * Tree space is huge, so we may need lots of data * We might not be able to find the *best* model at all as it is a greedy algorithm * It can be hard to assess uncertainty in inference about trees * Results can be quite variable (tree selection is not very stable) * Simple trees usually don't have a lot of predictive power ## Random Forests Random Forests are a **very popular** approach that addresses these shortcomings via resampling of the training data. Their goal is to improve prediction performance and reduce instability by _averaging_ multiple decision trees (a forest constructed with randomness). It uses two ideas to accomplish this. The first idea is *Bagging* (bootstrap aggregation) General scheme: 1. Build many decision trees $T_1, T_2, \ldots, T_B$ from training set 2. Given a new observation, let each $T_j$ predict $\hat{y}_j$ 3. For regression: predict average $\frac{1}{B} \sum_{j=1}^B \hat{y}_j$, for classification: predict with majority vote (most frequent class) How do we get many decision trees from a single training set? For this we use the _bootstrap_ resampling technique. To create $T_j, \, j=1,\ldots,B$ from training set of size $n$: a) create a bootstrap training set by sampling $n$ observations from training set **with replacement** b) build a decision tree from bootstrap training set ![](img/bootstrap.png) The second idea used in Random Forests is to use a random selection of features to split when deciding partitions. Specifically, when building each tree $T_j$, at each recursive partition only consider a randomly selected subset of predictors to check for best split. This reduces correlation between trees in forest, improving prediction accuracy. Let's look at the same car dataset again ```{r, echo=TRUE, messages=FALSE} set.seed(1234) train_indices <- sample(nrow(Auto), nrow(Auto)/2) train_set <- Auto[train_indices,] test_set <- Auto[-train_indices,] library(randomForest) auto_rf <- randomForest(mpg~cylinders+displacement+horsepower+weight+acceleration+year+origin, importance=TRUE, mtry=3, data=train_set) ``` Let's plot the predicted miles per gallon given by a random forest compared to the observed miiles per gallon in the training dataset. ```{r, echo=FALSE} plot(train_set$mpg, predict(auto_rf, newdata=train_set), xlab="Observed MPG", ylab="Predicted MPG", main="RF Training Error") abline(0,1) rmse <- sqrt( mean( (train_set$mpg - predict(auto_rf, newdata=train_set) )^2 )) legend("bottomright", legend=paste("RMSE=", round(rmse, digits=2)), cex=2) ``` Now let's look at the same plot on a _testing_ dataset. ```{r, echo=FALSE} plot(test_set$mpg, predict(auto_rf, newdata=test_set), xlab="Observed MPG", ylab="Predicted MPG", main="RF Testing Error") abline(0,1) rmse <- sqrt( mean( (test_set$mpg - predict(auto_rf, newdata=test_set) )^2 )) legend("bottomright", legend=paste("RMSE=", round(rmse, digits=2)), cex=2) ``` A disadvantage of random forests is that we lose interpretability. However, we can use the fact that a bootstrap sample was used to construct trees to measure _variable importance_ from the random forest. Here is a table of _variable importance_ for the random forest we just constructed. ```{r, echo=TRUE, results="asis"} variable_importance <- importance(auto_rf) knitr::kable(head(round(variable_importance, digits=2))) ``` And a barplot of the same data. ```{r, echo=FALSE} imp <- importance(auto_rf)[,2] par(mar=par()$mar+c(0,5,0,0)) o <- order(imp) barplot(imp[o], horiz=TRUE, xlab="Variable Importance", las=2, cex.names=1.6) ``` ## Tree-based methods summary Tree-based methods are very interpretable _prediction_ models. For which some inferential tasks are possible (e.g., variable importance in random forests), but are much more limited than the linear models we saw previously. These methods are very commonly used across many application domains and Random Forests often perform at state-of-the-art for many tasks. <file_sep>/content/lecture-note/datatypes/index.md --- date: 2016-09-09T11:57:48-04:00 title: Measurement and data types --- Examples of data sets, data types and measurements [Lecture Notes](DataTypes/) <file_sep>/materials/slides/logistic-regression/logistic-regression.Rmd --- title: "Logistic Regression" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Logistic Regression] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r setup1, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` --- layout: true ## Linear models for classification --- The general classification setting is: can we predict categorical response/output $Y$, from set of predictors $X_1,X_2,\ldots,X_p$? As in the regression case, we assume training data $(\mathbf{x}_1, y_1), \ldots, (\mathbf{x}_n, y_n)$. In this case, however, responses $y_i$ are categorical and take one of a fixed set of values. --- .image-40[![](img/4_1a.png)] .image-40[![](img/4_1b.png)] --- ### An example classification problem An individual's choice of transportation mode to commute to work. Predictors: income, cost and time required for each of the alternatives: driving/carpooling, biking, taking a bus, taking the train. Response: whether the individual makes their commute by car, bike, bus or train. --- ### Why not linear regression? Why can't we use linear regression in the classification setting. For categorical responses with more than two values, if order and scale (units) don't make sense, then it's not a regression problem --- For **binary** (0/1) responses, it's a little better. We could use linear regression in this setting and _interpret_ response $Y$ as a probability (e.g, if $\hat{y} > 0.5$ predict $\mathtt{drug overdose}$) --- .center.image-80[![](img/4_2.png)] --- ### Classification as probability estimation problem Instead of modeling classes 0 or 1 directly, we will model the conditional class probability $p(Y=1|X=x)$, and classify based on this probability. In general, classification approaches use _discriminant_ (think of _scoring_) functions to do classification. _Logistic regression_ is **one** way of estimating the class probability $p(Y=1|X=x)$ (also denoted $p(x)$) --- ```{r,echo=FALSE, message=FALSE} library(MASS) library(RColorBrewer) mycols <- brewer.pal(8, "Dark2")[c(3,2)] s <- sqrt(1/5) set.seed(30) makeX <- function(M, n=100, sigma=diag(2)*s) { z <- sample(1:nrow(M), n, replace=TRUE) m <- M[z,] return(t(apply(m,1,function(mu) mvrnorm(1,mu,sigma)))) } M0 <- mvrnorm(10, c(1,0), diag(2)) # generate 10 means x0 <- makeX(M0) ## the final values for y0=blue M1 <- mvrnorm(10, c(0,1), diag(2)) x1 <- makeX(M1) x <- rbind(x0, x1) y <- c(rep(0,100), rep(1,100)) cols <- mycols[y+1] GS <- 75 # put data in a Gs x Gs grid XLIM <- range(x[,1]) tmpx <- seq(XLIM[1], XLIM[2], len=GS) YLIM <- range(x[,2]) tmpy <- seq(YLIM[1], YLIM[2], len=GS) newx <- expand.grid(tmpx, tmpy) colnames(newx) <- c("X1","X2") ``` ```{r, echo=FALSE, fig.height=7, fig.width=10,fig.align="center"} layout(matrix(1:4, nr=2, byrow=FALSE)) plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n", main="Training Set") points(x, col=cols) # logistic regression dat <- data.frame(X1=x[,1], X2=x[,2]) fit <- glm(y~X1+X2, data=dat,family=binomial) yhat <- predict(fit, newdata=newx) yhat <- ifelse(yhat > 0, 2, 1) colshat <- mycols[yhat] coefs <- coef(fit) a <- -coefs[1] / coefs[3] b <- -coefs[2] / coefs[3] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="logistic regression") points(x, col=cols) points(newx, col=colshat, pch=".") abline(a=a,b=b) # KNN(15) library(class) yhat <- knn(x, newx, y, k=15) colshat <- mycols[as.numeric(yhat)] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="KNN(15)") points(x, col=cols) points(newx, col=colshat, pch=".") contour(tmpx, tmpy, matrix(as.numeric(yhat),GS,GS), levels=c(1,2), add=TRUE, drawlabels=FALSE) # KNN(1) yhat <- knn(x, newx, y, k=1) colshat <- mycols[as.numeric(yhat)] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="KNN(1)") points(x, col=cols) points(newx, col=colshat, pch=".") contour(tmpx, tmpy, matrix(as.numeric(yhat),GS,GS), levels=c(1,2), add=TRUE, drawlabels=FALSE) ``` --- ### Logistic regression The basic idea behind _logistic regression_ is to build a **linear** model _related_ to $p(x)$, since linear regression directly (i.e. $p(x) = \beta_0 + \beta_1 x$) doesn't work. --- Instead we build a linear model of _log-odds_: $$ \log \frac{p(x)}{1-p(x)} = \beta_0 + \beta_1 x $$ --- ```{r, echo=FALSE, results="hide", fig.width=10, fig.height=7, fig.align="center"} library(ISLR) data(Default) fit <- glm(default~balance, data=Default, family=binomial) ilogis <- function(theta) exp(theta) / (1 + exp(theta)) makeplot <- function(beta1) { with(Default, plot(balance, as.numeric(default)-1, ylab="Probability of default", main=substitute(list(hat(beta)[0]==beta0, hat(beta)[1]==beta1), list(beta0=round(coef(fit)[1], digits=2), beta1=round(beta1, digits=3))))) curve(ilogis(coef(fit)[1] + beta1 * x), add=TRUE, col="blue", lwd=1.3) abline(h=c(0,1), lty=2) } layout(matrix(1:4, nr=2, byrow=TRUE)) sapply(c(0.001, coef(fit)[2], 0.01, 0.1), makeplot) ``` --- Here is how we compute a logistic regression model in R ```{r, message=FALSE, echo=FALSE} library(ISLR) library(dplyr) library(broom) library(ggplot2) data(Default) ``` ```{r} default_fit <- glm(default ~ balance, data=Default, family=binomial) default_fit %>% tidy() ``` --- Interpretation of logistic regression models is slightly different than the linear regression model we looked at. In this case, the **odds** that a person defaults increase by $e^{0.05} \approx `r round(exp(0.05),3)`$ for every dollar in their account balance. --- As before, the **accuracy** of $\hat{\beta}_1$ as an estimate of the **population** parameter is given its standard error. We can again construct a confidence interval for this estimate as we've done before. --- As before, we can do hypothesis testing of a relationship between account balance and the probability of default. In this case, we use a $Z$-statistic $\frac{\hat{\beta}_1}{\mathrm{SE}(\hat{\beta}_1)}$ which plays the role of the t-statistic in linear regression: a scaled measure of our estimate (signal / noise). --- As before, the P-value is the probability of seeing a Z-value as large (e.g., 24.95) under the null hypothesis that **there is no relationship between balance and the probability of defaulting**, i.e., $\beta_1=0$ in the population. --- We require an algorithm required to _estimate_ parameters $\beta_0$ and $\beta_1$ according to a data fit criterion. In logistic regression we use the **Bernoulli** probability model we saw previously (think of flipping a coin weighted by $p(x)$), and _estimate_ parameters to **maximize** the _likelihood_ of the observed training data under this coin flipping (binomial) model. --- Usually, we do this by _minimizing_ the negative of the log likelihood of the model. I.e.: solve the following optimization problem $$\min_{\beta_0, \beta_1} \sum_{i:\, y_i=1} -y_i f(x_i) + \log (1+e^{f(x_i)})$$ where $f(x_i) = \beta_0 + \beta_1 x_i$. This is a non-linear (but convex) optimization problem. --- ### Making predictions We can use a learned logistic regression model to make predictions. E.g., "on average, the probability that a person with a balance of $1,000 defaults is": $$ \hat{p}(1000) = \frac{e^{\hat{\beta}_0 + \hat{\beta}_1 \times 1000}}{1+e^{\beta_0 + \beta_1 \times 1000}} \approx \frac{e^{-10.6514 + 0.0055 \times 1000}}{1+e^{-10.6514 + 0.0055 \times 1000}} \\ \approx 0.00576 $$ --- ### Multiple logistic regression This is a classification analog to linear regression: $$ \log \frac{p(\mathbf{x})}{1-p(\mathbf{x})} = \beta_0 + \beta_1 x_1 + \cdots + \beta_p x_p $$ --- ```{r} fit <- glm(default ~ balance + income + student, data=Default, family="binomial") fit %>% tidy() ``` --- As in multiple linear regression it is essential to avoid **confounding!**. --- Consider an example of single logistic regression of default vs. student status: ```{r} fit1 <- glm(default ~ student, data=Default, family="binomial") fit1 %>% tidy() ``` --- and a multiple logistic regression: ```{r} fit2 <- glm(default ~ balance + income + student, data=Default, family="binomial") fit2 %>% tidy() ``` --- ```{r, echo=FALSE} bal_range <- range(Default$balance) plot(0,0,xlim=bal_range,ylim=c(0,1),xlab="Credit Card Balance", ylab="Default Rate", type="n") curve(predict(fit1, newdata=data.frame(student="Yes", balance=x), type="response"), add=TRUE, lty=2, col="orange", lwd=1.6) curve(predict(fit1, newdata=data.frame(student="No", balance=x), type="response"), add=TRUE, lty=2, col="blue", lwd=1.6) curve(predict(fit2, newdata=data.frame(student="Yes", balance=x, income=mean(Default$income)), type="response"), add=TRUE, lty=1, col="orange", lwd=1.6) curve(predict(fit2, newdata=data.frame(student="No", balance=x, income=mean(Default$income)), type="response"), add=TRUE, lty=1, col="blue", lwd=1.6) ``` ```{r, echo=FALSE} boxplot(balance~student, data=Default, col=c("blue", "orange"), xlab="Student Status", ylab="Credit Card Balance") ``` --- layout: true ## Classifier evaluation --- How do we determine how well classifiers are performing? One way is to compute the _error rate_ of the classifier, the percent of mistakes it makes when predicting class --- We need a more precise language to describe classification mistakes: | | True Class + | True Class - | Total | |------------------:|:--------------------|---------------------|-------| | Predicted Class + | True Positive (TP) | False Positive (FP) | P* | | Predicted Class - | False Negative (FN) | True Negative (TN) | N* | | Total | P | N | | --- Using these we can define statistics that describe classifier performance | Name | Definition | Synonyms | |--------------------------------:|:-----------|---------------------------------------------------| | False Positive Rate (FPR) | FP / N | Type-I error, 1-Specificity | | True Positive Rate (TPR) | TP / P | 1 - Type-II error, power, sensitivity, **recall** | | Positive Predictive Value (PPV) | TP / P* | **precision**, 1-false discovery proportion | | Negative Predicitve Value (NPV) | FN / N* | | --- In the credit default case we may want to increase **TPR** (recall, make sure we catch all defaults) at the expense of **FPR** (1-Specificity, clients we lose because we think they will default) --- This leads to a natural question: Can we adjust our classifiers TPR and FPR? Remember we are classifying `Yes` if $$ \log \frac{P(Y=\mathtt{Yes}|X)}{P(Y=\mathtt{No}|X)} > 0 \Rightarrow \\ P(Y=\mathtt{Yes}|X) > 0.5 $$ What would happen if we use $P(Y=\mathtt{Yes}|X) > 0.2$? --- A way of describing the TPR and FPR tradeoff is by using the **ROC curve** (Receiver Operating Characteristic) and the **AUROC** (area under the ROC) Another metric that is frequently used to understand classification errors and tradeoffs is the precision-recall curve: --- layout: true ## Summary --- We approach classification as a class probability estimation problem. Logistic regression partition predictor space with linear functions. Logistic regression learns parameter using Maximum Likelihood (numerical optimization) --- Error and accuracy statistics are not enough to understand classifier performance. Classifications can be done using probability cutoffs to trade, e.g., TPR-FPR (ROC curve), or precision-recall (PR curve). Area under ROC or PR curve summarize classifier performance across different cutoffs. <file_sep>/materials/slides/interactive_vis/index.rmd --- title: "Interactive Visualization" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: ratio: "16:9" --- class: title-slide, center, middle count: false ```{r cowplot_setup, echo=FALSE, message=FALSE} library(cowplot) ``` .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Interactive Visualization] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] --- ## Why Interactivity? _Reduce data dimension_: allow user to explore large datasets by quickly switching between dimensions _Overview first, zoom and filter, details on demand_: Provide big picture, let the user explore details as they desire _Linked views for high dimensions_: There is a limit to the number of aesthetic mappings in a single graphic, make multiple graphics but link data objects between them --- ## Examples _Politics_: [http://www.nytimes.com/interactive/2012/11/02/us/politics/paths-to-the-white-house.html?_r=0](http://www.nytimes.com/interactive/2012/11/02/us/politics/paths-to-the-white-house.html?_r=0) _Movies_: [http://www.nytimes.com/interactive/2013/02/20/movies/among-the-oscar-contenders-a-host-of-connections.html](http://www.nytimes.com/interactive/2013/02/20/movies/among-the-oscar-contenders-a-host-of-connections.html) _Sports_: https://projects.fivethirtyeight.com/2018-march-madness-predictions/ --- ## Web-based interactive visualization Take advantage of HTML document description and the [Document Object Model](http://www.w3.org/DOM/) interface to _bind_ data to page elements. - [Shiny](http://shiny.rstudio.com): bind data to controls - [Data-driven Documents (d3.js)](http://d3js.org): bind data to svg elements directly --- class: split-50 ## HTML and DOM Web pages are structured using Hypertext Markup Language .column[ Basic idea is to only specify _content_ and _structure_ but not specify directly _how_ to render pages. ] .column[ ```{html} <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>Page Title</h1> <p>This is a really interesting paragraph.</p> </body> </html> ``` ] --- class: split-50 ## HTML and DOM Web pages are structured using Hypertext Markup Language .column[ Structure is provided by page _elements._ An important element we'll see later is the arbitrary grouping/containment element `div`. ] .column[ ```{html} <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>Page Title</h1> <p>This is a really interesting paragraph.</p> </body> </html> ``` ] --- class: split-50 ## HTML and DOM Web pages are structured using Hypertext Markup Language .column[ The hierarchical structure of elements in a document are defined by the _Document Object Model_ (DOM). ] .column[ ```{html} <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>Page Title</h1> <p>This is a really interesting paragraph.</p> </body> </html> ``` ] --- ## CSS Cascading Style Sheets are used to style elements in the DOM. ``` body { background-color: white; color: black; } ``` --- ## CSS In general: ``` selectorA, selectorB, selectorC { property1: value; property2: value; property3: value; } ``` --- ## SVG Scalable Vector Graphics (SVG) is special element used to create graphics with text. ``` <svg width="50" height="50"> <circle cx="25" cy="25" r="22" fill="blue" stroke="gray" stroke-width="2"/> </svg> ``` --- ## SVG Elements have _geometric_ attributes and _style_ attributes. ``` <circle cx="250" cy="25" r="25"/> ``` `cx`: x-coordinate of circle center `cy`: y-coordinate of circle center `r`: radius of circle --- ## SVG Elements have _geometric_ attributes and _style_ attributes. ``` <rect x="0" y="0" width="500" height="50"/> ``` `x`: x-coordinate of left-top corner `y`: y-coordinate of left-top corner `width`, `height`: width and height of rectangle --- ## SVG _style_ attributes ``` <circle cx="25" cy="25" r="22" fill="yellow" stroke="orange" stroke-width="5"/> ``` can be styled by class as well ``` svg .pumpkin { fill: yellow; stroke: orange; stroke-width: 5; } ``` ``` <circle cx="25" cy="25" r="22" class="pumpkin"> ``` --- ## Shiny and D3 Shiny: construct DOM and bind data (variables for example) to elements (a slide control for example) http://shiny.rstudio.com D3: bind data to SVG element attributes (position, size, color, transparency, etc.) http://d3js.org --- ## Reactivity Interactivity and binding in Shiny achieved using _reactive programming_. Where objects _react_ to changes in other objects. ![](img/reactive1.png) --- ## Reactivity Example: ![](img/reactive2.png) --- ## Reactivity With intermediate objects: ![](img/reactive3.png) --- ## Reactivity A standard paradigm for interactive (event-driven) application development A nice review paper: [http://dl.acm.org/citation.cfm?id=2501666](http://dl.acm.org/citation.cfm?id=2501666) --- ## Binding data to graphical elements With Shiny we can bind data objects to document elements. More examples: [http://shiny.rstudio.com/gallery/](http://shiny.rstudio.com/gallery/) We can also bind data directly to _graphical_ elements since using SVG these are also document elements (D3). --- ## D3 Tutorial [Slides](https://docs.google.com/presentation/d/1YgWaiW7dfQ8C3a_LFiIA9heEaKthY27ajNG0uDuvpws/edit#slide=id.g12bafcc19b_0_105) --- ## D3 Alternatives - If you want to use a toolkit of standard charts based on d3: [NVD3](http://nvd3.org/) - An alternative declarative library: [Vega](https://trifacta.github.io/vega/) - A no-hassle interactive vis library for multiple languages: - [plotly R](https://plotly.com/r/) - [plotly python](https://plotly.com/python/) - [plotly JS](https://plotly.com/javascript/) --- ## D3 and R - We saw previously that D3 can access external data through `json` - That's how we can pass data from R to the Javascript browser --- ## D3 and R - [rCharts](http://ramnathv.github.io/rCharts/): Most mature. Provides binding between R and a small set of javascript viz libraries. - [ggvis](http://ggvis.rstudio.com/): Uses grammar of graphics like ggplot2, bindings to [Vega](https://trifacta.github.io/vega/) to define JS charts. - [htmlwidgets](http://www.htmlwidgets.org/index.html) a formalization of how to bind R to JS libraries. - [Roll your own](https://github.com/jcheng5/shiny-js-examples) --- ## D3 and jupyter In jupyter you can use HTML and javascript directly, and use D3 and other JS libraries through that. For more info: https://blog.thedataincubator.com/2015/08/embedding-d3-in-an-ipython-notebook/ --- ## Interactive visualization Essential tool for exploration Helps manage high-dimensionality of data (don't go 3D, link charts!!) <file_sep>/content/syllabus/index.md --- date: 2016-08-29T05:41:03-04:00 title: Syllabus --- This page constiutes the official syllabus for this class. ## Course Information **Introduction to Data Science** **CMSC320 Spring 2020** * Lecture Meeting Times Monday and Wednesday, 5:00pm-6:15pm, IRB 0324 * Instructor: [<NAME>](https://www.hcbravo.org/) Center for Bioinformatics and Computational Biology Department of Computer Science <<EMAIL>> Office: 3226 Iribe Center for Computer Science and Engineering Phone Number: 301-405-2481 **Office Hours:** Friday 1:00pm-2:00pm and by appointment **Prerequisite**: Minimum grade of C- in CMSC216 and CMSC250 **Credits** 3 Data science encapsulates the interdisciplinary activities required to create data-centric products and applications that address specific scientific, socio-political or business questions. It has drawn tremendous attention from both academia and industry and is making deep inroads in industry, government, health and journalism. CMSC 320 focuses on (i) data management systems, (i) exploratory and statistical data analysis, (ii) data and information visualization, and (iv) the presentation and communication of analysis results. It will be centered around case studies and projects drawing extensively from applications. ### Topics Consult the [class home page]({{< baseurl >}}) for an up-to-date course topic schedule. ### Expected outcomes 1) Students will be able to create specific requirements for a data-centric application used to address a specific problem or question 2) Students will be able to identify and select appropriate tools: language, libraries and data resources, to meet specific requirements for a data-centric application 3) Students will be able to build and disseminate a data-centric application from a set of specific requirements using existing tools, libraries, data resources and publishing mechanisms. ## TAs and Office Hours Schedule **Location is AVW 4122 unless noted otherwise** | Instructor | Day | Start Time | End Time | Location | R/Python | |----------------------|-----------|------------|----------|----------|----------| | <NAME> | Monday | 12:00 | 14:00 | | Python | | <NAME> | Monday | 13:00 | 15:00 | | Both | | <NAME> | Monday | 15:00 | 17:00 | | Both | | <NAME> | Tuesday | 07:30 | 09:30 | | Both | | <NAME> | Tuesday | 09:30 | 11:30 | | Both | | <NAME> | Tuesday | 13:00 | 15:00 | | Both | | <NAME> | Tuesday | 15:00 | 17:00 | | Both | | <NAME> | Tuesday | 17:00 | 18:00 | | Both | | <NAME> | Wednesday | 10:00 | 12:00 | | Both | | <NAME> | Wednesday | 10:00 | 13:00 | | Both | | <NAME> | Wednesday | 15:00 | 17:00 | | Both | | <NAME> | Thursday | 07:30 | 09:30 | | Both | | <NAME> | Thursday | 09:00 | 11:00 | | Both | | <NAME> | Thursday | 14:00 | 18:00 | | Python | | <NAME> | Friday | 08:30 | 10:30 | | Both | | <NAME> | Friday | 09:00 | 11:00 | | Both | | <NAME> | Friday | 12:00 | 14:00 | | Python | | <NAME> | Friday | 13:00 | 14:00 | AVW 3223 | Both | | <NAME> | Friday | 14:00 | 16:00 | | Both | | <NAME> | Friday | 15:00 | 17:00 | | Both | ## Communication with course staff - We will use the class Piazza site [{{< piazzaurl >}}]({{< piazzaurl >}}) for questions, dicussion and announcements. - For any other communication (e.g., absences accomodations etc.) send message through ELMS: [{{< elmurl >}}]({{< elmurl >}}) - In case of an extended emergency closure, announcements on policy and procedures will be posted to Piazza. ## Textbook and Resources: There is no required textbook, the [lecture notes]({{< baseurl >}}/lecture-notes/) will serve as the primary material. However, we will be drawing heavily from these sources: * [<NAME> and <NAME>. R for Data Science](http://r4ds.had.co.nz/). * [<NAME>, <NAME>, <NAME> and <NAME>. Introduction to Statistical Learning with Applications in R. Springer 2013.](http://www-bcf.usc.edu/~gareth/ISL/). A free PDF of this book can be downloaded at their site. * [<NAME> and <NAME>. Practical Statistics for Data Scientists](http://shop.oreilly.com/product/0636920048992.do) * [<NAME> and <NAME>. Practical Data Science with R. Manning Publications Co. 2014](http://www.manning.com/zumel/). Additional readings will be posted in ELMS [{{< elmurl >}}]({{< elmurl >}}). Additional class resources are listed [here]({{< baseurl >}}/resources/) ## Student Expectations * There will be reading assignments. Students are expected to have read the material **before** class. * Students are expected to attend lectures. Active participation is expected. There will be graded work done in class. * Assignments are to be handed-in electronically or in class as instructed on their due date. Late assignments will not be accepted. * There will be graded work to be done in class. Students not in class that day, except for an excused absence, will not be able to complete that work outside class. * Students may discuss homeworks and projects in groups. However, each student must write and/or program solutions independently. * Posting project solutions in a public online location without express consent and permission from the instructor is a violation of academic integrity policy. * Cell phone usage is **prohibited** during lecture, laptop use will be allowed to the extent that students **demonstrably** use it to follow along an in-class analysis or demonstration. * You can earn full credit for class participation in three ways: (1) lecture participation, asking questions and answering your peers questions, (this will be impossible to keep track of, so please the two below :-) ) (2) piazza participation, asking and answering questions on piazza, (3) regular attendance to office hours (there will be sign-in sheets during office hours). To earn full credit you should aim to ask or answer a question at least once every two weeks on lecture or on piazza; or attend office hours at least once a month (this can include just going to my office hours to chat about computer science, data, science, software engineering, etc.). ## Assignments and Grades Assignments, projects and grades for the class will be posted to the class ELMS site. [{{< elmurl >}}]({{< elmurl >}}). Final course grades will be determined based on the following scheme: * Projects (40%) * Written homework (25%) * Midterm exams (20%) * Final Project (15%) You are responsible for all material discussed in lecture and posted on the class web page, including announcements, deadlines, policies, etc. ### Regrade Requests Regrade requests for homeworks, exams and projects will be accepted for 48 hours once grades are posted. We will use the following procedures. #### Homeworks and projects - Assignment/Exam/Project grades are _unmuted_ in ELMS, an announcement in Piazza will be made stating that regrade window is open - Submit request by sending message **through ELMS** to the instructor and TAs with subject `[CMSC320 <Assignment> Regrade Request]` - Requests submitted within the 48 hour window will be honored, we will reach out through ELMS #### Exams Regrade requests for exams will be handled through Gradescope ### Midterms There will be two in-class midterms (dates subject to minor shift based on course pace): - Midterm I: March 2 - Midterm II: April 20 ### Final Project There will be a final project due during the final examination period. The final project will be due on **Monday May 18 2020, 6:00pm**. ## Policies and Resources ### University Policies Policies relevant to Undergraduate Courses are found here: http://ugst.umd.edu/courserelatedpolicies.html. Topics that are addressed in these various policies include academic integrity, student and instructor conduct, accessibility and accommodations, attendance and excused absences, grades and appeals, copyright and intellectual property. ### Excused Absences Any student who needs to be excused for an absence from a single lecture or lab due to illness shall: - Make a reasonable attempt to inform the instructor of his/her illness prior to the class. - Upon returning to the class, present their instructor with a self-signed note attesting to the date of their illness. Each note must contain an acknowledgment by the student that the information provided is true and correct. Providing false information to University officials is prohibited under Part 9(h) of the Code of Student Conduct (V-1.00(B) University of Maryland Code of Student Conduct) and may result in disciplinary action. Missing an **exam** for reasons such as illness, religious observance, participation in required university activities, or family or personal emergency (such as a serious automobile accident or close relative’s funeral) will be excused so long as the absence is requested in writing at least **2 days** in advance and the student includes documentation that shows the absence qualifies as excused; a **self-signed note** is not sufficient as exams are Major Scheduled Grading Events. For this class, such events are the final project assessment and midterms, which will be due on the following dates: - Midterm I: March 2 - Midterm II: April 20 - Final Project: May 18, 6:00pm The final exam is scheduled according to the University Registrar. For medical absences, you must furnish documentation from the health care professional who treated you. This documentation must verify dates of treatment and indicate the timeframe that the student was unable to meet academic responsibilities. In addition, it must contain the name and phone number of the medical service provider to be used if verification is needed. No diagnostic information will ever be requested. Note that simply being seen by a health care professional does not constitute an excused absence; it must be clear that you were unable to perform your academic duties. It is the University’s policy to provide accommodations for students with religious observances conflicting with exams, but it is the your responsibility to inform the instructor in advance of intended religious observances. If you have a conflict with one of the planned exams, you must inform the instructor prior to the end of the first two weeks of the class. For missed exams due to excused absences, the instructor will arrange a makeup exam. If you might miss an exam for any other reason other than those above, you must contact the instructor in advance to discuss the circumstances. We are not obligated to offer a substitute assignment or to provide a makeup exam unless the failure to perform was due to an excused absence. The policies for excused absences **do not** apply to project assignments. Projects will be assigned with sufficient time to be completed by students who have a reasonable understanding of the necessary material and begin promptly. In cases of **extremely serious** documented illness of **lengthy duration** or other protracted, severe emergency situations, the instructor may consider extensions on project assignments, depending upon the specific circumstances. Besides the policies in this syllabus, the University’s policies apply during the semester. Various policies that may be relevant appear in the [Undergraduate Catalog](http://www.umd.edu/catalog). If you experience difficulty during the semester keeping up with the academic demands of your courses, you may consider contacting the Learning Assistance Service in 2201 Shoemaker Building at (301) 314-7693. Their educational counselors can help with time management issues, reading, note-taking, and exam preparation skills. ### Students with Disabilities Students with disabilities who have been certified by Disability Support Services as needing any type of special accommodations should see the instructor as soon as possible during the schedule adjustment period (the first two weeks of class). Please provide DSS’s letter of accommodation to the instructor at that time. All arrangements for exam accommodations as a result of disability **must** be made and arranged with the instructor at least **three** business days prior to the exam date; later requests (including retroactive ones) will be refused. ### Academic Integrity Academic integrity is an essential part of your educational program. Please find more information about academic integrity policies in the Computer Science Department here: [http://www.cs.umd.edu/class/resources/academicIntegrity.html](http://www.cs.umd.edu/class/resources/academicIntegrity.html) ### Course evaluations Course evaluations are important and the department and faculty take student feedback seriously. Students can go to [http://www.courseevalum.umd.edu](http://www.courseevalum.umd.edu) to complete their evaluations. ### Right to Change Information Although every effort has been made to be complete and accurate, unforeseen circumstances arising during the semester could require the adjustment of any material given here. Consequently, given due notice to students, the instructors reserve the right to change any information on this syllabus or in other course materials. Such changes will be announced and prominently displayed at the top of the syllabus. <file_sep>/content/lecture-note/tree_methods/index.md --- date: 2016-11-07T03:34:33-05:00 title: Tree-based methods --- Regression and Decision Trees. Random Forests. [Lecture Notes](TreeMethods/) # Resources ISLR Ch. 8 <file_sep>/materials/lecture-notes/27-regression.Rmd # Linear Regression ```{r regression_setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Linear regression is a very elegant, simple, powerful and commonly used technique for data analysis. We use it extensively in exploratory data analysis (we used in project 2, for example) and in statistical analyses since it fits into the statistical framework we saw in the last unit, and thus lets us do things like construct confidence intervals and hypothesis testing for relationships between variables. It also provides predictions for continuous outcomes of interest. Note: Much of this development is based on "Introduction to Statistical Learning" by <NAME>, Hastie and Tibshirani. http://www-bcf.usc.edu/~gareth/ISL/ ## Simple Regression Let's start with the simplest linear model. The goal here is to analyze the relationship between a _continuous numerical_ variable $Y$ and another (_numerical_ or _categorical_) variable $X$. We assume that in our population of interest the relationship between the two is given by a linear function: $$ Y = \beta_0 + \beta_1 X $$ Here is (simulated) data from an advertising campaign measuring sales and the amount spent in advertising. We think that sales are related to the amount of money spent on TV advertising: $$ \mathtt{sales} \approx \beta_0 + \beta_1 \times \mathtt{TV} $$ ![](img/regression_example.png) Given this data, we would say that we _regress_ `sales` on `TV` when we perform this regression analysis. As before, given data we would like to estimate what this relationship is in the _population_ (what is the population in this case?). What do we need to estimate in this case? Values for $\beta_0$ and $\beta_1$. What is the criteria that we use to estimate them? Just like the previous unit we need to setup an _inverse problem_. What we are stating mathematically in the linear regression problem is that the _conditional expectation_ (or conditional mean, conditional average) of $Y$ given $X=x$ is defined by this linear relationship: $$ \mathbb{E}[Y|X=x] = \beta_0 + \beta_1 x $$ Given a dataset, the inverse problem is then to find the values of $\beta_0$ and $\beta_1$ that minimize deviation between data and expectation, and again use squared devation to do this. **The linear regression problem** Given data $(x_1, y_1), (x_2, y_2), \ldots, (x_n, y_n)$, find values $\beta_0$ and $\beta_1$ that minimize _objective_ or _loss_ function RSS (residual sum of squares): $$ \arg \min_{\beta_0,\beta_1} RSS = \frac{1}{2} \sum_i (y_i - (\beta_0 + \beta_1 x_i))^2 $$ ![](img/minimizing.png) Similar to what we did with the derivation of the mean as a measure of central tendency we can derive the values of minimizers$\hat{\beta}_0$ and $\hat{\beta}_1$. We use the same principle, compute derivatives (partial this time) of the objective function RSS, set to zero and solve to obtain: $$ \begin{aligned} \hat{\beta}_1 & = \frac{\sum_{i=1}^n (y_i - \overline{y})(x_i - \overline{x})}{\sum_{i=1}^n (x_i - \overline{x})^2} \\ {} & = \frac{\mathrm{cov}(y,x)}{\mathrm{var}(x)} \\ \hat{\beta}_0 & = \overline{y} - \hat{\beta}_1 \overline{x} \end{aligned} $$ Let's take a look at some data. Here is data measuring characteristics of cars, including horsepower, weight, displacement, miles per gallon. Let's see how well a linear model captures the relationship between miles per gallon and weight ```{r, warning=FALSE, message=FALSE} library(ISLR) library(tidyverse) data(Auto) Auto %>% ggplot(aes(x=weight, y=mpg)) + geom_point() + geom_smooth(method=lm) + theme_minimal() ``` In R, linear models are built using the `lm` function ```{r} auto_fit <- lm(mpg~weight, data=Auto) auto_fit ``` This states that for this dataset $\hat{\beta}_0 = `r auto_fit$coef[1]`$ and $\hat{\beta}_1 = `r auto_fit$coef[2]`$. What's the interpretation? According to this model, a weightless car `weight=0` would run $\approx `r round(auto_fit$coef[1], 2)`$ _miles per gallon_ on average, and, on average, a car would run $\approx `r -round(auto_fit$coef[2],2)`$ _miles per gallon_ fewer for every extra _pound_ of weight. Note, that the units of the outcome $Y$ and the predictor $X$ matter for the interpretation of these values. ## Inference As we saw in the last unit, now that we have an estimate, we want to know its precision. We will see that similar arguments based on the CLT hold again. The main point is to understand that like the sample mean, the regression line we learn from a specific dataset is an estimate. A different sample from the same population would give us a different estimate (regression line). But, the CLT tells us that, on average, we are close to population regression line (I.e., close to $\beta_0$ and $\beta_1$), that the spread around $\beta_0$ and $\beta_1$ is well approximated by a normal distribution and that the spread goes to zero as the sample size increases. ![](img/population_line.png) ### Confidence Interval Using the same framework as before, we can construct a confidence interval to say how precise we think our estimates of the population regression line is. In particular, we want to see how precise our estimate of $\beta_1$ is, since that captures the relationship between the two variables. We again, use a similar framework. First, we calculate a standard error estimate for $\beta_1$: $$ \mathrm{se}(\hat{beta}_1)^2 = \frac{\sum_i (y_i - \hat{y}_i)^2}{\sum_i (x_i - \overline{x})^2} $$ and construct a 95% confidence interval $$ \beta_1 = \hat{\beta}_1 \pm 1.95 \times \mathrm{se}(\hat{beta}_1) $$ Note, $\hat{y}_i = \hat{\beta}_0 + \hat{\beta}_1 x_i$. Going back to our example: ```{r} auto_fit_stats <- auto_fit %>% tidy() %>% select(term, estimate, std.error) auto_fit_stats ``` This `tidy` function is defined by the `broom` package, which is very handy to manipulate the result of learning models in a consistent manner. The `select` call removes some extra information that we will discuss shortly. ```{r} confidence_interval_offset <- 1.95 * auto_fit_stats$std.error[2] confidence_interval <- round(c(auto_fit_stats$estimate[2] - confidence_interval_offset, auto_fit_stats$estimate[2], auto_fit_stats$estimate[2] + confidence_interval_offset), 4) ``` Given the confidence interval, we would say, "on average, a car runs $_{`r confidence_interval[1]`} `r confidence_interval[2]`_{`r confidence_interval[3]`}$ _miles per gallon_ fewer per pound of weight. ### The $t$-statistic and the $t$-distribution As in the previous unit, we can also test a null hypothesis about this relationship: "there is no relationship between weight and miles per gallon", which translates to $\beta_1=0$. Again, using the same argument based on the CLT, if this hypothesis is true then the distribution of $\hat{\beta}_1$ is well approximated by $N(0,\mathrm{se}(\hat{\beta}_1))$, and if we observe the learned $\hat{\beta}_1$ is _too far_ from 0 according to this distribution then we _reject_ the hypothesis. Now, there is a technicality here that we did not discuss in the previous unit that is worth paying attention to. We saw before that the CLT states that the normal approximation is good as sample size increases, but what about moderate sample sizes (say, less than 100)? The $t$ distribution provides a better approximation of the sampling distribution of these estimates for moderate sample sizes, and it tends to the normal distribution as sample size increases. The $t$ distribution is commonly used in this testing situation to obtain the probability of rejecting the null hypothesis. It is based on the $t$-statistic $$ \frac{\hat{\beta}_1}{\mathrm{se}(\hat{\beta}_1)} $$ You can think of this as a _signal-to-noise_ ratio, or a standardizing transformation on the estimated parameter. Under the null hypothesis, the $t$-statistic is well approximated by a $t$-distribution with $n-2$ _degrees of freedom_ (we will get back to _degrees of freedom_ shortly). Like other distributions, you can compute with the $t$-distribution using the `p,d,q,r`-family of functions, e.g., `pt` is the cumulative probability distribution function. In our example, we get a $t$ statistic and P-value as follows: ```{r} auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats ``` We would say: "We found a statistically significant relationship between weight and miles per gallon. On average, a car runs $_{`r confidence_interval[1]`} `r confidence_interval[2]`_{`r confidence_interval[3]`}$ _miles per gallon_ fewer per pound of weight ($t$=`r round(auto_fit_stats$statistic[2],2)`, $p$-value<`r auto_fit_stats$p.value[2]`$)." ### Global Fit Now, notice that we can make _predictions_ based on our conditional expectation, and that prediction should be better than a prediction with a simple average. We can use this comparison as a measure of how good of a job we are doing using our model to fit this data: how much of the variance of $Y$ can we _explain_ with our model. To do this we can calculate _total sum of squares_: $$ TSS = \sum_i (y_i - \overline{y})^2 $$ (this is the squared error of a prediction using the sample mean of $Y$) and the _residual sum of squares_: $$ RSS = \sum_i (y_i - \hat{y}_i)^2 $$ (which is the squared error of a prediction using the linear model we learned) The commonly used $R^2$ measure comparse these two quantities: $$ R^2 = \frac{\mathrm{TSS}-\mathrm{RSS}}{\mathrm{TSS}} = 1 - \frac{\mathrm{RSS}}{\mathrm{TSS}} $$ These types of global statistics for the linear model can be obtained using the `glance` function in the `broom` package. In our example ```{r} auto_fit %>% glance() %>% select(r.squared, sigma, statistic, df, p.value) ``` We will explain the the columns `statistic`, `df` and `p.value` when we discuss regression using more than a single predictor $X$. ## Some important technicalities We mentioned above that predictor $X$ could be _numeric_ or _categorical_. However, this is not precisely true. We can use a transformation to represent _categorical_ variables. Here is a simple example: Suppose we have a categorical variable `sex` with values `female` and `male`, and we want to show the relationship between, say `credit card balance` and `sex`. We can create a dummy variable $x$ as follows: $$ x_i = \left\{ \begin{aligned} 1 & \textrm{ if female} \\ 0 & \textrm{o.w.} \end{aligned} \right. $$ and fit a model $y = \beta_0 + \beta_1 x$. What is the conditional expectation given by this model? If the person is male, then $y=\beta_0$, if the person is female, then $y=\beta_0 + \beta_1$. So, what is the interpretation of $\beta_1$? The average difference in credit card balance between females and males. We could do a different encoding: $$ x_i = \left\{ \begin{aligned} +1 & \textrm{ if female} \\ -1 & \textrm{o.w.} \end{aligned} \right. $$ Then what is the interpretation of $\beta_1$ in this case? Note, that when we call the `lm(y~x)` function and `x` is a factor with two levels, the first transformation is used by default. What if there are more than 2 levels? We need multiple regression, which we will see shortly. ## Issues with linear regression There are some assumptions underlying the inferences and predictions we make using linear regression that we should verify are met when we use this framework. Let's start with four important ones that apply to simple regression ### Non-linearity of outcome-predictor relationship What if the underlying relationship is not linear? We will see later that we can capture non-linear relationships between variables, but for now, let's concentrate on detecting if a linear relationship is a good approximation. We can use exploratory visual analysis to do this for now by plotting residuals $(y_i - \hat{y}_i)^2$ as a function of the fitted values $\hat{y}_i$. The `broom` package uses the `augment` function to help with this task. It augments the input data used to learn the linear model with information of the fitted model for each observation ```{r} augmented_auto <- auto_fit %>% augment() augmented_auto %>% head() ``` With that we can make the plot we need to check for possible non-linearity ```{r} augmented_auto %>% ggplot(aes(x=.fitted,y=.resid)) + geom_point() + geom_smooth() + labs(x="fitted", y="residual") ``` ### Correlated Error For our inferences to be valid, we need residuals to be independent and identically distributed. We can spot non independence if we observe a trend in residuals as a function of the predictor $X$. Here is a simulation to demonstrate this: ![](img/correlated_error.png) In this case, our standard error estimates would be underestimated and our confidence intervals and hypothesis testing results would be biased. ### Non-constant variance Another violation of the iid assumption would be observed if the spread of residuals is not independent of the fitted values. Here is an illustration, and a possible fix using a log transformation on the outcome $Y$. ![](img/residual_variance.png) ## Multiple linear regression Now that we've seen regression using a single predictor we'll move on to regression using multiple predictors. In this case, we use models of conditional expectation represented as linear functions of multiple variables: $$ \mathbb{E}[Y|X_1=x_1,X_2=x_2,\ldots,X_p=x_p] = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \cdots \beta_3 x_3 $$ In the case of our advertising example, this would be a model: $$ \mathtt{sales} = \beta_0 + \beta_1 \times \mathtt{TV} + \beta_2 \times \mathtt{newspaper} + \beta_3 \times \mathtt{facebook} $$ These models let us make statements of the type: "holding everything else constant, sales increased on average by 1000 per dollar spent on Facebook advertising" (this would be given by parameter $\beta_3$ in the example model). ### Estimation in multivariate regression Generalizing simple regression, we estimate $\beta$'s by minimizing an objective function that represents the difference between observed data and our expectation based on the linear model: $$ \begin{aligned} RSS & = \frac{1}{2} \sum_{i=1}^n (y_i - \hat{y}_i)^2 \\ {} & = \frac{1}{2} \sum_{i=1}^n (y_i - (\beta_0 + \beta_1 x_1 + \cdots + \beta_p x_p))^2 \end{aligned} $$ ![](img/multiple_rss.png) The minimizer is found using numerical algorithms to solve this type of _least squares_ problems. These are covered in Linear Algebra courses, and include the QR decomposition, Gauss-Seidel method, and many others. Later in the course we will look at _stochastic gradient descent_, a simple algorithm that scales to very large datasets. ### Example (cont'd) Continuing with our Auto example, we can build a model for miles per gallon using multiple predictors: ```{r} auto_fit <- lm(mpg~1+weight+cylinders+horsepower+displacement+year, data=Auto) auto_fit ``` From this model we can make the statement: "Holding everything else constant, cars run 0.76 miles per gallon more each year on average". ### Statistical statements (cont'd) Like simple linear regression, we can construct confidence intervals, and test a null hypothesis of no relationship ($\beta_j=0$) for the parameter corresponding to each predictor. This is again nicely managed by the `broom` package: ```{r} auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats %>% knitr::kable() ``` ```{r, echo=FALSE} print_confint <- function(fit_df, term, digits=2) { i <- match(term, fit_df$term) confint_offset <- 1.95 * fit_df$std.error[i] confint <- round(c(fit_df$estimate[i] - confint_offset, fit_df$estimate[i], fit_df$estimate[i] + confint_offset), digits) paste0("{}_{", confint[1], "} ", confint[2], "_{", confint[3], "}") } print_pval <- function(fit_df, term) { i <- match(term, fit_df$term) pval <- fit_df$p.value[i] out <- ifelse(pval<1e-16, "<1e-16", paste0("=", pval)) paste0("P-value", out) } ``` In this case we would reject the null hypothesis of no relationship only for predictors `weight` and `year`. We would write the statement for year as follows: "Holding everything else constant, cars run $`r auto_fit_stats %>% print_confint("year")`$ miles per gallon more each year on average (P-value=`r auto_fit_stats %>% print_pval("year")`)". ### The F-test We can make additional statements for multivariate regression: "is there a relationship between _any_ of the predictors and the response?". Mathematically, we write this as $\beta_1 = \beta_2 = \cdots = \beta_p = 0$. Under the null, our model for $y$ would be estimated by the sample mean $\overline{y}$, and the error for that estimate is by total sum of squared error $TSS$. As before, we can compare this to the residual sum of squared error $RSS$ using the $F$ statistic: $$ \frac{(\mathrm{TSS}-\mathrm{RSS})/p}{\mathrm{RSS}/(n-p-1)} $$ If this statistic is greater (enough) than 1, then we reject hypothesis that there is no relationship between response and predictors. Back to our example, we use the `glance` function to compute this type of summary: ```{r} auto_fit %>% glance() %>% select(r.squared, sigma, statistic, df, p.value) %>% knitr::kable("html") ``` In comparison with the linear model only using `weight`, this multivariate model explains _more of the variance_ of `mpg`, but using more predictors. This is where the notion of _degrees of freedom_ comes in: we now have a model with expanded _representational_ ability. However, the bigger the model, we are conditioning more and more, and intuitively, given a fixed dataset, have fewer data points to estimate conditional expectation for each value of the predictors. That means, that are estimated conditional expectation is less _precise_. To capture this phenomenon, we want statistics that tradeoff how well the model fits the data, and the "complexity" of the model. Now, we can look at the full output of the `glance` function: ```{r} auto_fit %>% glance() %>% knitr::kable("html") ``` Columns `AIC` and `BIC` display statistics that penalize model fit with model size. The smaller this value, the better. Let's now compare a model only using `weight`, a model only using `weight` and `year` and the full multiple regression model we saw before. ```{r} lm(mpg~weight, data=Auto) %>% glance() %>% knitr::kable("html") ``` ```{r} lm(mpg~weight+year, data=Auto) %>% glance() %>% knitr::kable("html") ``` In this case, using more predictors beyond `weight` and `year` doesn't help. ### Categorical predictors (cont'd) We saw transformations for categorical predictors with only two values, and deferred our discussion of categorical predictors with more than two values. In our example we have the `origin` predictor, corresponding to where the car was manufactured, which has multiple values ```{r} Auto <- Auto %>% mutate(origin=factor(origin)) levels(Auto$origin) ``` As before, we can only use numerical predictors in linear regression models. The most common way of doing this is to create new dummy predictors to _encode_ the value of the categorical predictor. Let's take a categorical variable `major` that can take values `CS`, `MATH`, `BUS`. We can encode these values using variables $x_1$ and $x_2$ $$ x_1 = \left\{ \begin{aligned} 1 & \textrm{ if MATH} \\ 0 & \textrm{ o.w.} \end{aligned} \right. $$ $$ x_2 = \left\{ \begin{aligned} 1 & \textrm{ if BUS} \\ 0 & \textrm{ o.w.} \end{aligned} \right. $$ Now let's build a model to capture the relationship between `salary` and `major`: $$ \mathtt{salary} = \beta_0 + \beta_1 x_1 + \beta_2 x_2 $$ What is the expected salary for a CS major? $\beta_0$. For a MATH major? $\beta_0 + \beta_1$. For a BUS major? $\beta_0 + \beta_2$. So, $\beta_1$ is the average difference in salary between MATH and CS majors. How can we calculate the average difference in salary between MATH and BUS majors? $\beta_1 - \beta_2$. The `lm` function in R does this transformation by default when a variable has class `factor`. We can see what the underlying numerical predictors look like by using the `model_matrix` function and passing it the model formula we build: ```{r} extended_df <- model.matrix(~origin, data=Auto) %>% as.data.frame() %>% mutate(origin = Auto$origin) extended_df %>% filter(origin == "1") %>% head() ``` ```{r} extended_df %>% filter(origin == "2") %>% head() ``` ```{r} extended_df %>% filter(origin == "3") %>% head() ``` ## Interactions in linear models The linear models so far include _additive_ terms for a single predictor. That let us made statemnts of the type "holding everything else constant...". But what if we think that a pair of predictors _together_ have a relationship with the outcome. We can add these _interaction_ terms to our linear models as products: $$ \mathbb{E} Y|X_1=x_1,X_2=x2 = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \beta_{12} x_1 x_2 $$ Consider the advertising example: $$ \mathtt{sales} = \beta_0 + \beta_1 \times \mathtt{TV} + \beta_2 \times \mathtt{facebook} + \beta_3 \times (\mathtt{TV} \times \mathtt{facebook}) $$ If $\beta_3$ is positive, then the effect of increasing TV advertising money is increased if facebook advertising is also increased. When using categorical variables, interactions have an elegant interpretation. Consider our car example, and suppose we build a model with an interaction between `weight` and `origin`. Let's look at what the numerical predictors look like: ```{r} extended_df <- model.matrix(~weight+origin+weight:origin, data=Auto) %>% as.data.frame() %>% mutate(origin = Auto$origin) extended_df %>% filter(origin == "1") %>% head() ``` ```{r} extended_df %>% filter(origin == "2") %>% head() ``` ```{r} extended_df %>% filter(origin == "3") %>% head() ``` So what is the expected miles per gallon for a car with `origin == 1` as a function of weight? $$ \mathtt{mpg} = \beta_0 + \beta_1 \times \mathtt{weight} $$ Now how about a car with `origin == 2`? $$ \mathtt{mpg} = \beta_0 + \beta_1 \times \mathtt{weight} + \beta_2 + \beta_4 \times \mathtt{weight} $$ Now think of the graphical representation of these lines. For `origin == 1` the intercept of the regression line is $\beta_0$ and its slope is $\beta_1$. For `origin == 2` the intercept of the regression line is $\beta_0 + \beta_2$ and its slope is $\beta_1+\beta_4$. `ggplot` does this when we map a factor variable to a aesthetic, say color, and use the `geom_smooth` method: ```{r} Auto %>% ggplot(aes(x=weight, y=mpg, color=origin)) + geom_point() + geom_smooth(method=lm) ``` The intercept of the three lines seem to be different, but the slope of `origin == 3` looks different (decreases faster) than the slopes of `origin == 1` and `origin == 2` that look very similar to each other. Let's fit the model and see how much statistical confidence we can give to those observations: ```{r} auto_fit <- lm(mpg~weight*origin, data=Auto) auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats %>% knitr::kable() ``` So we can say that for `origin == 3` the relationship between `mpg` and `weight` is different but not for the other two values of `origin`. Now, there is still an issue here because this could be the result of a poor fit from a linear model, it seems none of these lines do a very good job of modeling the data we have. We can again check this for this model: ```{r} auto_fit %>% augment() %>% ggplot(aes(x=.fitted, y=.resid)) + geom_point() ``` The fact that residuals are not centered around zero suggests that a linear fit does not work well in this case. ### Additional issues with linear regression We saw previously some issues with linear regression that we should take into account when using this method for modeling. Multiple linear regression introduces an additional issue that is extremely important to consider when interpreting the results of these analyses: collinearity. ![](img/collinearity.png) In this example, you have two predictors that are very closely related. In that case, the set of $\beta$'s that minimize RSS may not be unique, and therefore our interpretation is invalid. You can identify this potential problem by regressing predictors onto each other. The usual solution is to fit models only including one of the colinear variables. <file_sep>/materials/lecture-notes/14-db_parting_shots.Rmd # DB Parting Shots ## Database Query Optimization Earlier we made the distinction that SQL is a _declarative_ language rather than a _procedural_ language. A reason why data base systems rely on a declarative language is that it allows the system to decide how to _evaluate_ a query _most efficiently_. Let's think about this briefly. Consider a database where we have two tables `Batting` and `Master` and we want to evaluate this query: that _what is the maximum batting "average" for a player from the state of California_? ```{r, echo=FALSE, eval=FALSE} db <- DBI::dbConnect(RSQLite::SQLite(), "data/lahman2016.sqlite") ``` ```sql select max(1.0 * b.H / b.AB) as best_ba from Batting as b join Master as m on b.playerId = m.playerId where b.AB >= 100 and m.birthState = "CA" ``` Now, let's do the same computation using `dplyr` operations: ```{r} library(Lahman) library(tidyverse) ``` Here is one version that joins the two tables before filtering the rows that are included in the result. ```{r} Batting %>% inner_join(Master, by="playerID") %>% filter(AB >= 100, birthState == "CA") %>% mutate(AB=1.0 * H / AB) %>% summarize(max(AB)) ``` Here is a second version that filters the rows of the tables _before_ joining the two tables. ```{r} Batting %>% filter(AB >= 100) %>% inner_join( Master %>% filter(birthState == "CA") ) %>% mutate(AB = 1.0 * H / AB) %>% summarize(max(AB)) ``` They both give the same result of course, but which one should be more efficient? Let's think about this with a very simple _cost_ model of how efficient each of these two procedural versions will be. The costliest operation here is the join between two tables. Let's take the simplest algorithm we can to compute the join `T1 %>% inner_join(T2, by="A")` where `T1` and `T2` are the tables to join and `A` the attribute we are using to join the two tables. ```{r, echo=FALSE, out.width='50%'} knitr::include_graphics("img/inner_join_alg.png") ``` What is the cost of this algorithm? $|T1| \times |T2|$. For the rest of the operations, let's assume we perform this with a single pass through the table. For example, we assume that `filter(T)` has cost $|T|$. Under these assumptions, let's write out the cost of each of the two pipelines we wrote above. ```{r, eval=FALSE} Batting %>% inner_join(Master, by="playerID") %>% # cost: |Batting| x |Master| filter(AB >= 100, birthState == "CA") %>% # cost: |R1| mutate(AB=1.0 * H / AB) %>% # cost: |R| summarize(max(AB)) # cost: |R| ``` So, cost here is $|\mathrm{Batting}|\times|\mathrm{Master}| + |R1| + 2|R|$ where $R1$ is the table resulting from the inner join between `Batting` and `Master` and $R$ is $R$ filtered to rows with `AB >=100 & birthState == "CA"`. We can compute this: ```{r} batting_size <- nrow(Batting) master_size <- nrow(Master) r1 <- Batting %>% inner_join(Master, by="playerID") r1_size <- nrow(r1) r <- filter(r1, AB>=100, birthState == "CA") r_size <- nrow(r) total_cost_v1 <- batting_size * master_size + r1_size + 2*r_size total_cost_v1 ``` Now, let's look at the second version. ```{r, eval=FALSE} Batting %>% filter(AB >= 100) %>% # cost: |Batting| inner_join( Master %>% filter(birthState == "CA") # cost: |Master| ) %>% # cost: |B1| x |M1| mutate(AB = 1.0 * H / AB) %>% # cost |R| summarize(max(AB)) # cost |R| ``` So, cost here is $|\mathrm{Batting}| \times |\mathrm{Master}| + |B1|\times|M1|+2|R|$ where $B1$ is the `Batting` table filtered to include only rows with `AB >= 100`, and $M2$ is the `Master` table filtered to include `birthState == "CA"`. Let's compute this: ```{r} b1 <- filter(Batting, AB>=100) b1_size <- nrow(b1) m1 <- filter(Master, birthState == "CA") m1_size <- nrow(m1) total_cost_v2 <- batting_size + master_size + b1_size * m1_size + 2*r_size total_cost_v2 ``` In this case, the procedural version that joins tables before filtering is `r format(total_cost_v1 / total_cost_v2, digits=1)` times costlier. When using SQL in a database system we only write the one query describing our desired result, with the _procedural_ versions with `dplyr` we need to think which of the two versions is more efficient. Database systems use _query optimization_ to decide how to evaluate queries efficiently. The goal of query optimization is to decide the most efficient query _plan_ to use to evaluate a query out of the many possible candidate plans it could use. It needs to solve two problems: search the space of possible plans, approximate the _cost_ of evaluating a specific plan. Let's ignore the first, and discuss briefly the second. We should think of the two procedural versions above as two candidate plans that the DB system _could_ use to evaluate the query. Query optimzation _approximates_ what it would cost to evaluate each of the two plans and decides to use the most efficient plan. So, how does it approximate cost? A few ingredients are used: - Access cost: how much will it cost to access rows that satisfy a given predicate (`where` clause)? Consider the `Master` table. In our query we only need to find rows for players born in California. Suppose we have an _index_ based on attribute `birthState`, e.g. a hash table that allows us to find rows for players from a specific state very efficiently. In that case, accessing these rows using the index is much more efficient than scanning the entire table. This is why creating indexes for tables becomes important. - Operation cost: how much will it cost to perform a join? There is a difference between comparing every pair of rows in order to compute a join, versus using indexes to find a small number of rows that satisfy the join condition efficiently? For example, if the `Batting` table has an index on `playerId` it will be cheaper to join with a filtered `Master` table, i.e., only considering rows for players born in California. - Result size estimation: how many rows will we get after we perform a join? We can use information on key constraints to estimate this type of result. Additionally, these estimates also depend on the number of rows that satisfy certain predicates (e.g., number of players born in California) so systems often use histograms to make these estimates. As database system users we may create indices or key constraints that guide the query optimizer to choose more efficient queries. ```{r, echo=FALSE, eval=FALSE} DBI::dbDisconnect(db) ``` ## JSON Data Model The Entity-Relational data model we have described so far is mostly defined for _structured data_: where a specific and consistent schema is assumed. Data models like XML and JSON are instead intended for *semi-structured* data. #### XML: eXtensible Markup Language Data models like XML rely on flexible, self-describing schemas: ```xml <?xml version="1.0" encoding="UTF-8"?> <!-- Edited by XMLSpy --> <CATALOG> <CD> <TITLE><NAME></TITLE> <ARTIST><NAME></ARTIST> <COUNTRY>USA</COUNTRY> <COMPANY>Columbia</COMPANY> <PRICE>10.90</PRICE> <YEAR>1985</YEAR> </CD> <CD> <TITLE>Hide your heart</TITLE> <ARTIST><NAME></ARTIST> <COUNTRY>UK</COUNTRY> <COMPANY>CBS Records</COMPANY> <PRICE>9.90</PRICE> <YEAR>1988</YEAR> </CD> ... ``` #### JSON: Javascript Object Notation Very similar to XML and seems to be replacing it for many purposes ```json { "firstName": "John", "lastName": "Smith", "isAlive": true, "age": 25, "height_cm": 167.6, "address": { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" }, "phoneNumbers": [ { "type": "home", "number": "212 555-1234" }, { "type": "office", "number": "646 555-4567" } ], "children": [], "spouse": null } ``` This is the format most contemporary data REST APIs use to transfer data. For instance, here is part of a JSON record from a Twitter stream: ```json { "created_at":"Sun May 05 14:01:34+00002013", "id":331046012875583488, "id_str":"331046012875583488", "text":"\u0425\u043e\u0447\u0443, \u0447\u0442\u043e\u0431 \u0442\u044b \u0441\u0434\u0435\u043b\u0430\u043b \u0432\u0441\u0451 \u043d\u0435\u043e\u0431\u0445\u043e\u0434\u0438\u043c\u043e\u0435.\n \\,,\\ *_* \/,,\/", "source":"\u003ca href=\"http:\/\/twitterfeed.com\"rel=\"nofollow\"\u003etwitterfeed\u003c\/a\u003e", "in_reply_to_user_id_str":null, "user":{ "id":548422428, "id_str":"548422428", "name":"\u0410\u0439\u0433\u0435\u0440\u0438\u043c \u041f\u043e\u0433\u043e\u0434\u0438\u043d\u0430", "screen_name":"paddybyrny", "location":"\u0420\u043e\u0441\u0441\u0438\u044f;\u0412\u043b\u0430\u0434\u0438\u0432\u043e\u0441\u0442\u043e\u043a", "followers_count":4188, "friends_count":4281, "lang":"en", "profile_background_image_url":"http:\/\/a0.twimg.com\/images\/themes\/theme1\/bg.png", }, "geo":null, "coordinates":null, "entities":{ "hashtags":[],"symbols":[],"urls":[],"user_mentions":[] },"favorited":false,"retweeted":false,"filter_level":"medium","lang":"ru"} ``` <file_sep>/materials/lecture-notes/25-multivariate_probability.Rmd # Multivariate probability ## Joint and conditional probability Suppose that for each tweet I sample I can also say if it has _a lot_ of retweets or not. So, I have another binary random variable $Y \in \{0,1\}$ where $Y=1$ indicates the sampled tweet has a lot of retweets. (Note, we could say $Y\sim \mathrm{Bernoulli}(p_Y))$. So we could illustrate the population of "all" tweets as ![](img/joint.png) We can talk of the joint probability distribution of $X$ and $Y$: $p(X=x, Y=y)$, where random variables $X$ and $Y$ can take values from domains $\mathcal{D}_X$ and $\mathcal{D}_Y$ respectively. Here we have the same conditions as we had for univariate distributions: 1. $p(X=x,Y=y)\geq 0$ for all combination of values $x$ and $y$, and 2. $\sum_{(x,y) \in \mathcal{D}_X \times \mathcal{D}_Y} p(X=x,Y=y) = 1$ We can also talk about _conditional probability_ where we look at the probability of a tweet being bot-generated or not, _conditioned_ on wether it has lots of retweets or not: $$ p(X=x | Y=y) $$ which also needs to satisfy the properties of a probability distribution. So to make sure $$ \sum_{x \in \mathcal{D}_X} p(X=x|Y=y) = 1 $$ we define $$ p(X=x | Y=y) = \frac{p(X=x,Y=y)}{p(Y=y)} $$ Here we use the important concept of _marginalization_, which follows from the properties of joint probability distribution we saw above: $\sum_{x \in \mathcal{D}_X} p(X=x, Y=y) = p(Y=y)$. This also lets us talk about _independence_: if the probabilty of a tweet being bot-generated _does not_ depend on a tweet having lots of retweets, that is $p(X=x) = p(X=x|Y=y)$ for all $y$, then we say $X$ is _independent_ of $Y$. Consider the tweet diagram above, is $X$ independent of $Y$? What would the diagram look like if $X$ was independent of $Y$? One more note, you can also see that for independent variables, the joint probability has an easy form $p(X=x,Y=y)=p(X=x)p(Y=y)$, which generalizes to more than two independent random variables. ## Bayes' Rule One extremely useful and important rule of probability follows from our definitions of conditional and joint probability above. Bayes' rule is pervasive in Statistics, Machine Learning and Artificial Intelligence. It is a very powerful tool to talk about uncertainty, beliefs, evidence, and many other technical and philosophical matters. It is however, of extreme simplicity. All Bayes' Rule states is that $$ p(X=x|Y=y) = \frac{p(Y=y|X=x)p(X=x)}{p(Y=y)} $$ which follow directly from our definitions above. One very common usage of Bayes' Rule is that it let's us define one conditional probability distribution based on another probability distribution. This is useful is the latter is easier to reason about, or estimate. For example, it may be hard to reason about $p(X=x|Y=y)$ in our tweet example. If you know a tweet has a lot retweets $(Y=1)$, what can you say about the probability that it is bot-generated, i.e., $p(X=1|Y=1)$? Maybe not much, tweets have lots of retweets for many reasons. However, it may be easier to reason about the reverse: if I tell you a tweet is bot-generated $(X=1)$, what can you say about the probability that it has a lot of retweets, i.e., $p(Y=1|X=1)$. That may be easier to reason about, at least bot-generated tweets are designed to get lots of retweets. At minimum, it's easier to estimate because we can get a training set of bot-generated tweets and _estimate_ this conditional probability. Bayes' Rule tells us how to get the hard to reason about (or estimate) conditional probability $p(X=x|Y=y)$ in terms of the conditional probability that is easier to reason about (or estimate) $p(Y=y|X=x)$. This is the basis of the Naive Bayes prediction method, which we'll revisit briefly later on. ## Conditional expectation With conditional probabilty we can start talking about conditional expectation, which generalizes the concept of expectation we saw before. For example, the _conditional expected value_ (conditional mean) of $X$ given $Y=y$ is $$ \mathbb{E} [ X|Y=y ] = \sum_{x \in \mathcal{D}_X} x p(X=x|Y=y) $$ This notion of conditional expectation, which follows from conditional probability, will serve as the basis for our Machine Learning method studies in the next few lectures! ## Maximum likelihood One last note. We saw before how we estimated a parameter from matching expectation from a probability model with what we observed in data. The most popular method of estimation uses a similar idea: given data $x_1,x_2,\ldots,x_n$ and an assumed model of their distribution, e.g., $X_i\sim \mathrm{Bernoulli}(p)$ for all $i$, and they are iid, let's find the value of parameter $p$ that maximizes the likelihood (or probability) of the data we observe under this assumed probability model. We call the resulting estimate the _maximum likelihood estimate_. Here are some fun exercises to try: 1) Given a sample $x_1$ with $X_1 \sim N(\mu,1)$, show that the maximum likelihood estimate of $\mu$, $\hat{\mu}=x_1$. It is most often convinient to _minimize negative log-likelihood_ instead of maximizing likelihood. So in this case: $$ \begin{align} -\mathscr{L}(\mu) & = - \log p(X_1=x_1) \\ {} & = \log{\sqrt{2\pi}} + \frac{1}{2}(x_1 - \mu)^2 \end{align} $$ To minimize this function of $\mu$ we can ignore all terms that are independent of $\mu$, and concentrate only on minimizing the last term. Now, this term is always positive, so the smallest value it can have is 0. So, we minimize it by setting $\hat{\mu}=x_1$. 2) Given a sample $x_1,x_2,\ldots,x_n$ of $n$ iid random variables with $X_i \sim N(\mu,1)$ for all $i$, show that the maximum likelihood estimate of $\mu$, $\hat{\mu}=\overline{x}$ the sample mean! Here we would follow a similar approach, write out the negative log likelihood as a function $f(\mu;x_i)$ of $\mu$ that depends on data $x_i$. Two useful properties here are: 1. $p(X_1=x_1,X_2=x_2,\ldots,X_n=x_n)=p(X_1=x_1)p(X_2=x_2)\cdots p(X_n=x_n)$, and 2. $\log \prod_i f(\mu;x_i) = \sum_i \log f(\mu;x_i)$ Then find a value of $\mu$ that minimizes this function. Hint: we saw this when we showed that the sample mean is the minimizer of total squared distance in our exploratory analysis unit! <file_sep>/docs/misc/cmsc320_install_libs.R install.packages(c( "ISLR", "Lahman", "MASS", "RColorBrewer", "ROCR", "broom", "caTools", "class", "cowplot", "cvTools", "gapminder", "janeaustenr", "lubridate", "nycflights13", "randomForest", "readr", "rvest", "stringr", "tidyr", "tidytext", "tidyverse", "rpart", "caret"), repos="https://cran.rstudio.com" ) <file_sep>/materials/slides/wrapup/index.rmd --- title: "Wrapup" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: ratio: "16:9" --- class: title-slide, center, middle count: false ```{r cowplot_setup, echo=FALSE, message=FALSE} library(cowplot) ``` .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Wrapup] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] --- layout: true ## At UMD --- - Next courses (dive deeper): Machine Learning (422) Databases (424) - Other relevant courses: - Computational Methods (460, for optimization) - Bioinformatics (423, application of things we saw in class) - HCI (434, communication and interaction) - CMSC 498O: Selected Topics in Computer Science; Introduction to Data Visualization Advanced Undergraduate Course (Prof. <NAME>) --- There is a Data Science specialization that include these and other courses: [http://undergrad.cs.umd.edu/degree-requirements-cs-major](http://undergrad.cs.umd.edu/degree-requirements-cs-major) --- Start thinking of research opportunities - If you plan on going to grad school, it makes a big difference in applications - If you don't plan on going to grad school, it gives you experience thinking about data-centric problems and applications For data science, in general, it is important to show qualifaction academically, and productively. Have a portfolio! Github can be very useful. --- layout: true ## Outside UMD --- **Get busy!** - Kaggle competitions: [https://www.kaggle.com/](https://www.kaggle.com/) - Get involved in open source projects. If there was something you wished existed while doing class work, build it! - Join the local DS community: [http://www.datacommunitydc.org/](http://www.datacommunitydc.org/) --- **Learn new things** - Python has a lot of useful stuff for data science [http://www.amazon.com/Python-Data-Analysis-Wrangling-IPython/dp/1449319793](http://www.amazon.com/Python-Data-Analysis-Wrangling-IPython/dp/1449319793) - Tutorials on Kaggle are pretty good, their _kernels_ area is fun to look at: https://www.kaggle.com/kernels - Hector's Data Science corner: http://www.hcbravo.org/IntroDataSci/datasci_corner/ - Many resources available online --- **Stay informed** - Lots of interesting articles and posts, from many different perspectives: [https://www.oreilly.com/topics/data](https://www.oreilly.com/topics/data) - There's even podcasts!: - [Data Skeptic](http://dataskeptic.com/) - [Data Stories](http://datastori.es/) - [Talking Machines](http://www.thetalkingmachines.com/) - [Not so standard deviations](https://soundcloud.com/nssd-podcast) - [More](http://dataconomy.com/top-10-data-science-and-machine-learning-podcasts/) --- Remember, Data Science affords opportunities beyond the mathematical and the technical. These are skills that can make significant impact outside the technical realm: journalism, health, civics, etc. E.g., https://medium.com/@dpatil Think about what motivates **you** first, and then figure out how to dive in. <file_sep>/docs/homeworks/ab_testing.md.bak --- title: "Homework 4: A/B Testing" date: "2018-03-12" --- An exercise on statistical inference for A/B testing. **DUE**: Tuesday April 16, 11:59pm <!--more--> Download questions here: [{{<baseurl>}}misc/ab_testing.pdf](/misc/ab_testing.pdf) Submit as PDF on ELMS. You'll need Rmarkdown for this one. <file_sep>/materials/quizzes/wrangling_exercise.Rmd --- title: "Wrangling Exercise" author: "CMSC320" date: "`r Sys.Date()`" output: html_document: default pdf_document: default --- This is an exercise on data wrangling using the flights dataset: ```{r, eval=TRUE, message=FALSE} library(tidyverse) library(nycflights13) data(flights) data(airlines) ``` ```{r, echo=FALSE, fig.width=3, fig.height=3} library(datamodelr) dm_f <- dm_from_data_frames(flights, airlines) dm_f <- dm_add_references(dm_f, flights$carrier == airlines$carrier) graph <- dm_create_graph(dm_f, rankdir="LR", col_attr = c("column", "type")) dm_render_graph(graph) ``` 1. Filter flights to include only flights with an arrival delay greater than 2 hours (delays are recorded in minutes) 2. Select columns in the flights table that contain delay times (note that the function `matches` can be used to select columns with names that matches a regular expression. See `?dplyr::select`) 3. Find the minimum arrival delay (this can be a negative number) in the flights table. Use `summarize`. 4. Find minimum arrival delay (again, can be a negative number) in the flights table for each destination airport. Use `group_by' and 'summarize`. 5. List the name of all airlines and the number of flights for each airline in flights table. The `airlines` table has airline names. <file_sep>/materials/lecture-notes/16-tidying.Rmd # Tidying data ```{r setup_ch16, echo=FALSE, message=FALSE} library(tidyverse) select <- dplyr::select ``` This section is concerned with common problems in data preparation, namely use cases commonly found in raw datasets that need to be addressed to turn messy data into tidy data. These would be operations that you would perform on data obtained as a csv file from a collaborator or data repository, or as the result of scraping data from webpages or other sources. We derive many of our ideas from the paper [Tidy Data](http://www.jstatsoft.org/v59/i10/paper) by <NAME>. Associated with that paper we will use two very powerful R libraries `tidyr` and `dplyr` which are extremely useful in writing scripts for data cleaning, preparation and summarization. A basic design principle behind these libraries is trying to effectively and efficiently capture very common use cases and operations performed in data cleaning. The paper frames these use cases and operations which are them implemented in software. ## Tidy Data Here we assume we are working with a data model based on rectangular data structures where 1. Each attribute (or variable) forms a column 2. Each entity (or observation) forms a row 3. Each type of entity (observational unit) forms a table Here is an example of a tidy dataset: ```{r} library(nycflights13) head(flights) ``` it has one observation per row, a single variable per column. Notice only information about flights are included here (e.g., no airport information other than the name) in these observations. ## Common problems in messy data The set of common operations we will study are based on these common problems found in datasets. We will see each one in detail: - Column headers are values, not variable names (gather) - Multiple variables stored in one column (split) - Variables stored in both rows and column (rotate) - Multiple types of observational units are stored in the same table (normalize) - Single observational unit stored in multiple tables (join) We are using data from Hadley's paper found in [github](https://github.com/hadley/tidyr). It's included directory `data`: ```{r, eval=TRUE, echo=TRUE} data_dir <- "data" ``` ### Headers as values The first problem we'll see is the case where a table header contains values. ```{r} library(tidyverse) pew <- read_csv(file.path(data_dir, "pew.csv")) pew ``` This table has the number of survey respondents of a specific religion that report their income within some range. A tidy version of this table would consider the *variables* of each observation to be `religion, income, frequency` where `frequency` has the number of respondents for each religion and income range. The function to use in the `tidyr` package is `gather`: ```{r} tidy_pew <- gather(pew, income, frequency, -religion) tidy_pew ``` This says: gather all the columns from the `pew` (except `religion`) into key-value columns `income` and `frequency`. This table is much easier to use in other analyses. Another example: this table has a row for each song appearing in the billboard top 100. It contains track information, and the date it entered the top 100. It then shows the rank in each of the next 76 weeks. ```{r} billboard <- read_csv(file.path(data_dir, "billboard.csv")) billboard ``` Challenge: This dataset has values as column names. Which column names are values? How do we tidy this dataset? ### Multiple variables in one column The next problem we'll see is the case when we see multiple variables in a single column. Consider the following dataset of tuberculosis cases: ```{r} tb <- read_csv(file.path(data_dir, "tb.csv")) tb ``` This table has a row for each year and strain of tuberculosis (given by the first two columns). The remaining columns state the number of cases for a given demographic. For example, `m1524` corresponds to males between 15 and 24 years old, and `f1524` are females age 15-24. As you can see each of these columns has two variables: `sex` and `age`. Challenge: what else is untidy about this dataset? So, we have to do two operations to tidy this table, first we need to use `gather` the tabulation columns into a `demo` and `n` columns (for demographic and number of cases): ```{r} tidy_tb <- gather(tb, demo, n, -iso2, -year) tidy_tb ``` Next, we need to `separate` the values in the `demo` column into two variables `sex` and `age` ```{r} tidy_tb <- separate(tidy_tb, demo, c("sex", "age"), sep=1) tidy_tb ``` This calls the `separate` function on table `tidy_db`, separating the `demo` variable into variables `sex` and `age` by separating each value after the first character (that's the `sep` argument). We can put these two commands together in a pipeline: ```{r} tidy_tb <- tb %>% gather(demo, n, -iso2, -year) %>% separate(demo, c("sex", "age"), sep=1) tidy_tb ``` ### Variables stored in both rows and columns This is the messiest, commonly found type of data. Let's take a look at an example, this is daily weather data from for one weather station in Mexico in 2010. ```{r} weather <- read_csv(file.path(data_dir, "weather.csv")) weather ``` So, we have two rows for each month, one with maximum daily temperature, one with minimum daily temperature, the columns starting with `d` correspond to the day in the where the measurements were made. Challenge: How would a tidy version of this data look like? ```{r} weather %>% gather(day, value, d1:d31, na.rm=TRUE) %>% spread(element, value) ``` The new function we've used here is `spread`. It does the inverse of `gather` it spreads columns `element` and `value` into separate columns. ### Multiple types in one table Remember that an important aspect of tidy data is that it contains exactly one kind of observation in a single table. Let's see the billboard example again after the `gather` operation we did before: ```{r} tidy_billboard <- billboard %>% gather(week, rank, wk1:wk76, na.rm=TRUE) tidy_billboard ``` Let's sort this table by track to see a problem with this table: ```{r} tidy_billboard <- tidy_billboard %>% arrange(track) tidy_billboard ``` We have a lot of repeated information in many of these rows (the artist, track name, year, title and date entered). The problem is that this table contains information about both tracks and rank in billboard. That's two different kinds of observations that should belong in two different tables in a tidy dataset. Let's make a song table that only includes information about songs: ```{r} song <- tidy_billboard %>% dplyr::select(artist, track, year, time, date.entered) %>% unique() song ``` The `unique` function removes any duplicate rows in a table. That's how we have a single row for each song. Next, we would like to remove all the song information from the rank table. But we need to do it in a way that still remembers which song each ranking observation corresponds to. To do that, let's first give each song an identifier that we can use to link songs and rankings. So, we can produce the final version of our song table like this: ```{r} song <- tidy_billboard %>% dplyr::select(artist, track, year, time, date.entered) %>% unique() %>% mutate(song_id = row_number()) song ``` The `mutate` function adds a new column to the table, in this case with column name `song_id` and value the row number the song appears in the table (from the `row_number` column). Now we can make a rank table, we combine the tidy billboard table with our new song table using a `join` (we'll learn all about joins later). It checks the values on each row of the billboard table and looks for rows in the song table that have the exact same values, and makes a new row that combines the information from both tables. ```{r} tidy_billboard %>% left_join(song, c("artist", "year", "track", "time", "date.entered")) ``` That adds the `song_id` variable to the `tidy_billboard` table. So now we can remove the song information and only keep ranking information and the `song_id`. ```{r} rank <- tidy_billboard %>% left_join(song, c("artist", "year", "track", "time", "date.entered")) %>% dplyr::select(song_id, week, rank) rank ``` Challenge: Let's do a little better job at tidying the billboard dataset: 1. When using `gather` to make the `week` and `rank` columns, remove any weeks where the song does not appear in the top 100. This is coded as missing (`NA`). See the `na.rm` argument to `gather`. 2. Make `week` a numeric variable (i.e., remove `wk`). See what the `extract_numeric` function does. 3. Instead of `date.entered` add a `date` column that states the actual date of each ranking. See how R deals with dates `?Date` and how you can turn a string into a `Date` using `as.Date`. 4. Sort the resulting table by date and rank. 5. Make new `song` and `rank` tables. `song` will now not have the `date.entered` column, and `rank` will have the new `date` column you have just created. <file_sep>/materials/lectures/Regression/linear_regression.Rmd --- title: "Linear Regression" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Linear regression is a very elegant, simple, powerful and commonly used technique for data analysis. We use it extensively in exploratory data analysis (we used in project 2, for example) and in statistical analyses since it fits into the statistical framework we saw in the last unit, and thus let's do things like construct confidence intervals and hypothesis testing for relationships between variables. ## Simple Regression Let's start with the simplest linear model. The goal here is to analyze the relationship between a _continuous numerical_ variable $Y$ and another (_numerical_ or _categorical_) variable $X$. We assume that in our population of interest the relationship between the two is given by a linear function: $$ Y = \beta_0 + \beta_1 X $$ Here is (simulated) data from an advertising campaign measuring sales and the amount spent in advertising. We think that sales are related to the amount of money spent on TV advertising: $$ \mathtt{sales} \approx \beta_0 + \beta_1 \times \mathtt{TV} $$ ![](regression_example.png) Given this data, we would say that we _regress_ `sales` on `TV` when we perform this regression analysis. As before, given data we would like to estimate what this relationship is in the _population_ (what is the population in this case?). What do we need to estimate in this case? Values for $\beta_0$ and $\beta_1$. What is the criteria that we use to estimate them? Just like the previous unit we need to setup an _inverse problem_. What we are stating mathematically in the linear regression problem is that the _conditional expectation_ (or conditional mean, conditional average) of $Y$ given $X=x$ is defined by this linear relationship: $$ \mathbb{E}[Y|X=x] = \beta_0 + \beta_1 x $$ Given a dataset, the inverse problem is then to find the values of $\beta_0$ and $\beta_1$ that minimize deviation between data and expectation, and again use squared devation to do this. **The linear regression problem** Given data $(x_1, y_1), (x_2, y_2), \ldots, (x_n, y_n)$, find values $\beta_0$ and $\beta_1$ that minimize _objective_ or _loss_ function RSS (residual sum of squares): $$ \arg \min_{\beta_0,\beta_1} RSS = \frac{1}{2} \sum_i (y_i - (\beta_0 + \beta_1 x_i))^2 $$ ![](minimizing.png) Similar to what we did with the derivation of the mean as a measure of central tendency we can derive the values of minimizers$\hat{\beta}_0$ and $\hat{\beta}_1$. We use the same principle, compute derivatives (partial this time) of the objective function RSS, set to zero and solve to obtain: $$ \begin{align} \hat{\beta}_1 & = \frac{\sum_{i=1}^n (y_i - \overline{y})(x_i - \overline{x})}{\sum_{i=1}^n (x_i - \overline{x})^2} \\ {} & = \frac{\mathrm{cov}(y,x)}{\mathrm{var}(x)} \\ \hat{\beta}_0 & = \overline{y} - \hat{\beta}_1 \overline{x} \end{align} $$ Let's take a look at some data. Here is data measuring characteristics of cars, including horsepower, weight, displacement, miles per gallon. Let's see how well a linear model captures the relationship between miles per gallon and weight ```{r, warning=FALSE, message=FALSE} library(ISLR) library(dplyr) library(ggplot2) library(broom) data(Auto) Auto %>% ggplot(aes(x=weight, y=mpg)) + geom_point() + geom_smooth(method=lm) + theme_minimal() ``` In R, linear models are built using the `lm` function ```{r} auto_fit <- lm(mpg~weight, data=Auto) auto_fit ``` This states that for this dataset $\hat{\beta}_0 = `r auto_fit$coef[1]`$ and $\hat{\beta}_1 = `r auto_fit$coef[2]`$. What's the interpretation? According to this model, a weightless car `weight=0` would run $\approx `r round(auto_fit$coef[1], 2)`$ _miles per gallon_ on average, and, on average, a car would run $\approx `r -round(auto_fit$coef[2],2)`$ _miles per gallon_ fewer for every extra _pound_ of weight. Note, that the units of the outcome $Y$ and the predictor $X$ matter for the interpretation of these values. ## Inference As we saw in the last unit, now that we have an estimate, we want to know how good of an estimate this is. We will see that similar arguments based on the CLT hold again. The main point is to understand that like the sample mean, the regression line we learn from a specific dataset is an estimate. A different sample from the same population would give us a different estimate (regression line). But, the CLT tells us that, on average, we are close to population regression line (I.e., close to $\beta_0$ and $\beta_1$), that the spread around $\beta_0$ and $\beta_1$ is well approximated by a normal distribution and that the spread goes to zero as the sample size increases. ![](population_line.png) ### Confidence Interval Using the same framework as before, we can construct a confidence interval to say how precise we think our estimates of the population regression line is. In particular, we want to see how precise our estimate of $\beta_1$ is, since that captures the relationship between the two variables. We again, use a similar framework. First, we calculate a standard error estimate for $\beta_1$: $$ \mathrm{se}(\hat{beta}_1)^2 = \frac{\sum_i (y_i - \hat{y}_i)^2}{\sum_i (x_i - \overline{x})^2} $$ and construct a 95% confidence interval $$ \beta_1 = \hat{\beta}_1 \pm 1.95 \times \mathrm{se}(\hat{beta}_1) $$ Note, $\hat{y}_i = \hat{\beta}_0 + \hat{\beta}_1 x_i$. Going back to our example: ```{r} auto_fit_stats <- auto_fit %>% tidy() %>% select(term, estimate, std.error) auto_fit_stats ``` This `tidy` function is defined by the `broom` package, which is very handy to manipulate the result of learning models in a consistent manner. The `select` call removes some extra information that we will discuss shortly. ```{r} confidence_interval_offset <- 1.95 * auto_fit_stats$std.error[2] confidence_interval <- round(c(auto_fit_stats$estimate[2] - confidence_interval_offset, auto_fit_stats$estimate[2], auto_fit_stats$estimate[2] + confidence_interval_offset), 4) ``` Given the confidence interval, we would say, "on average, a car runs $_{`r confidence_interval[1]`} `r confidence_interval[2]`_{`r confidence_interval[3]`}$ _miles per gallon_ fewer per pound of weight. ### The $t$-statistic and the $t$-distribution As in the previous unit, we can also test a null hypothesis about this relationship: "there is no relationship between weight and miles per gallon", which translates to $\beta_1=0$. Again, using the same argument based on the CLT, if this hypothesis is true then the distribution of $\hat{\beta}_1$ is well approximated by $N(0,\mathrm{se}(\hat{\beta}_1))$, and if we observe the learned $\hat{\beta}_1$ is _too far_ from 0 according to this distribution then we _reject_ the hypothesis. Now, there is a technicality here that we did not discuss in the previous unit that is worth paying attention to. We saw before that the CLT states that the normal approximation is good as sample size increases, but what about moderate sample sizes (say, less than 100)? The $t$ distribution provides a better approximation of the sampling distribution of these estimates for moderate sample sizes, and it tends to the normal distribution as sample size increases. The $t$ distribution is commonly used in this testing situation to obtain the probability of rejecting the null hypothesis. It is based on the $t$-statistic $$ \frac{\hat{\beta}_1}{\mathrm{se}(\hat{\beta}_1)} $$ You can think of this as a _signal-to-noise_ ratio, or a standardizing transformation on the estimated parameter. Under the null hypothesis, it was shown that the $t$-statistic is well approximated by a $t$-distribution with $n-2$ _degrees of freedom_ (we will get back to _degrees of freedom_ shortly). Like other distributions, you can compute with the $t$-distribution using the `p,d,q,r`-family of functions, e.g., `pt` is the cumulative probability distribution function. In our example, we get a $t$ statistic and P-value as follows: ```{r} auto_fit_stats <- auto_fit %>% tidy() auto_fit_stats ``` We would say: "We found a statistically significant relationship between weight and miles per gallon. On average, a car runs $_{`r confidence_interval[1]`} `r confidence_interval[2]`_{`r confidence_interval[3]`}$ _miles per gallon_ fewer per pound of weight ($t$=`r round(auto_fit_stats$statistic[2],2)`, $p$-value<`r auto_fit_stats$p.value[2]`$)." ### Global Fit Now, notice that we can make _predictions_ based on our conditional expectation, and that prediction should be better than a prediction with a simple average. We can use this comparison as a measure of how good of a job we are doing using our model to fit this data: how much of the variance of $Y$ can we _explain_ with our model. To do this we can calculate _total sum of squares_: $$ TSS = \sum_i (y_i - \overline{y})^2 $$ (this is the squared error of a prediction using the sample mean of $Y$) and the _residual sum of squares_: $$ RSS = \sum_i (y_i - \hat{y}_i)^2 $$ (which is the squared error of a prediction using the linear model we learned) The commonly used $R^2$ measure comparse these two quantities: $$ R^2 = \frac{\mathrm{TSS}-\mathrm{RSS}}{\mathrm{TSS}} = 1 - \frac{\mathrm{RSS}}{\mathrm{TSS}} $$ These types of global statistics for the linear model can be obtained using the `glance` function in the `broom` package. In our example ```{r} auto_fit %>% glance() %>% select(r.squared, sigma, statistic, df, p.value) ``` We will explain the the columns `statistic`, `df` and `p.value` when we discuss regression using more than a single predictor $X$. ## Some important technicalities We mentioned above that predictor $X$ could be _numeric_ or _categorical_. However, this is not precisely true. We can use a transformation to represent _categorical_ variables. Here is a simple example: Suppose we have a categorical variable `sex` with values `female` and `male`, and we want to show the relationship between, say `credit card balance` and `sex`. We can create a dummy variable $x$ as follows: $$ x_i = \left\{ \begin{align} 1 & \textrm{ if female} \\ 0 & \textrm{o.w.} \end{align} \right. $$ and fit a model $y = \beta_0 + \beta_1 x$. What is the conditional expectation given by this model? If the person is male, then $y=\beta_0$, if the person is female, then $y=\beta_0 + \beta_1$. So, what is the interpretation of $\beta_1$? The average difference in credit card balance between females and males. We could do a different encoding: $$ x_i = \left\{ \begin{align} +1 & \textrm{ if female} \\ -1 & \textrm{o.w.} \end{align} \right. $$ Then what is the interpretation of $\beta_1$ in this case? Note, that when we call the `lm(y~x)` function and `x` is a factor with two levels, the first transformation is used by default. What if there are more than 2 levels? We need multiple regression, which we will see shortly. ## Issues with linear regression There are some assumptions underlying the inferences and predictions we make using linear regression that we should verify are met when we use this framework. Let's start with four important ones that apply to simple regression ### Non-linearity of outcome-predictor relationship What if the underlying relationship is not linear? We will see later that we can capture non-linear relationships between variables, but for now, let's concentrate on detecting if a linear relationship is a good approximation. We can use exploratory visual analysis to do this for now by plotting residuals $(y_i - \hat{y}_i)^2$ as a function of the fitted values $\hat{y}_i$. The `broom` package uses the `augment` function to help with this task. It augments the input data used to learn the linear model with information of the fitted model for each observation ```{r} augmented_auto <- auto_fit %>% augment() augmented_auto %>% head() ``` With that we can make the plot we need to check for possible non-linearity ```{r} augmented_auto %>% ggplot(aes(x=.fitted,y=.resid)) + geom_point() + geom_smooth() + labs(x="fitted", y="residual") ``` ### Correlated Error For our inferences to be valid, we need residuals to be independent and identically distributed. We can spot non independence if we observe a trend in residuals as a function of the predictor $X$. Here is a simulation to demonstrate this: ![](correlated_error.png) In this case, our standard error estimates would be underestimated and our confidence intervals and hypothesis testing results would be biased. ### Non-constant variance Another violation of the iid assumption would be observed if the spread of residuals is not independent of the fitted values. Here is an illustration, and a possible fix using a log transformation on the outcome $Y$. ![](residual_variance.png) <file_sep>/materials/lecture-notes/32-clustering.Rmd # Unsupervised Learning: Clustering So far we have seen "Supervised Methods" where our goal is to analyze a _response_ (or outcome) based on various _predictors_. In many cases, especially for Exploratory Data Analysis, we want methods to extract patterns on variables without analyzing a specific _response_. Methods for the latter case are called "Unsupervised Methods". Examples are _Principal Component Analysis_ and _Clustering_. Interpretation of these methods is much more _subjective_ than in Supervised Learning. For example: if we want to know if a given _predictor_ is related to _response_, we can perform statistical inference using hypothesis testing. In another example, if we want to know which predictors are useful for prediction: use cross-validation to do model selection. Finally, if we want to see how well we can predict a specific response, we can use cross-validation to report on test error. In unsupervised methods, there is no similar clean evaluation methodology. Nonetheless, they can be very useful methods to understand data at hand. ## Motivating Example Throughout this unit we will use a time series dataset of mortgage affordability as calculated and distributed by Zillow: https://www.zillow.com/research/data/. ```{r clustering_setup, echo=TRUE, message=FALSE, warning=FALSE} library(tidyverse) library(readr) library(lubridate) datadir <- "data" url <- "http://files.zillowstatic.com/research/public/Affordability_Wide_2017Q4_Public.csv" filename <- basename(url) datafile <- file.path(datadir, filename) if (!file.exists(datafile)) { download.file(url, file.path(datadir, filename)) } afford_data <- read_csv(datafile) ``` ```{r tidy_zillow, echo=TRUE, cache=TRUE} tidy_afford <- afford_data %>% filter(Index == "Mortgage Affordability") %>% drop_na() %>% filter(RegionID != 0) %>% dplyr::select(RegionID, matches("^[1|2]")) %>% gather(time, affordability, matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m"))) wide_afford_df <- tidy_afford %>% dplyr::select(RegionID, time, affordability) %>% spread(time, affordability) value_mat <- wide_afford_df %>% dplyr::select(-RegionID) %>% as.matrix() ``` ```{r zillow_stats, echo=FALSE} ncounties <- nrow(wide_afford_df) year_range <- range(year(tidy_afford$time)) ``` The dataset contains affordability measurements for `r ncounties` counties with data from `r min(year_range)` to `r max(year_range)`. Here we plot the time series of affordability for all counties. ```{r zillow_plot1, echo=TRUE, cache=TRUE, warning=FALSE} tidy_afford %>% ggplot(aes(x=time,y=affordability,group=factor(RegionID))) + geom_line(color="GRAY", alpha=3/4, size=1/2) + labs(title="County-Level Mortgage Affordability over Time", x="Date", y="Mortgage Affordability") ``` A natural question to ask about this data is, "can we partition counties into groups of counties with similar value trends across time" ## Some Preliminaries Mathematically, the previous sections on "Supervised Learning" we were concerned with estimates that minimize some error function relative to the outcome of interest $Y$: $$ \mu(x) = \arg \min_{\theta} E_{Y|X} L(Y, \beta) $$ In order to do this, explicitly or not, the methods we were using would be concerned with properties of the conditional probability distribution $p(Y|X)$, and do so without concerning itself with probability distribution $p(X)$ of the covariates themselves. In unsupervised learning, we are interested in properties of $p(X)$. In our example, what can we say about the distribution of home value time series? Since the dimensionality of $p(X)$ can be large, unsupervised learning methods seek to find structured representations of $p(X)$ that would be possible to estimate. In _clustering_ we assume that predictor space is partitioned and that $p(X)$ is defined over those partitions. In _dimensionality reduction_ we assume that $p(X)$ is really defined over a space of smaller dimension. We will start studying clustering first. ## Cluster Analysis The high-level goal of cluster analysis is to organize objects (observations) that are _similar_ to each other into groups. We want objects within a group to be more _similar_ to each other than objects in different groups. Central to this high-level goal is how to measure the degree of _similarity_ between objects. A clustering method then uses the _similarity_ measure provided to it to group objects into clusters. ```{r setup_kmeans, echo=TRUE, warning=FALSE, message=FALSE} library(broom) library(stringr) ``` ```{r kmeans1, cache=FALSE, echo=FALSE} set.seed(1234) kmeans_res <- kmeans(value_mat, centers=9) augmented_data <- kmeans_res %>% broom::augment(wide_afford_df) %>% gather(time, affordability, matches("^X")) %>% mutate(time=stringr::str_replace(time, "X", "")) %>% type_convert(col_types=cols(time=col_date(format="%Y.%m.%d"))) %>% rename(cluster=.cluster) kmeans_centers <- kmeans_res %>% broom::tidy(col.names=colnames(value_mat)) %>% dplyr::select(cluster, matches("^[1|2]")) %>% gather(time, affordability, -cluster) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m-%d"))) ``` ```{r kmeans_plot, echo=TRUE, cache=TRUE, fig.height=6} augmented_data %>% ggplot(aes(x=time, y=affordability)) + geom_line(aes(group=RegionID), color="GRAY", alpha=1/2, size=1/2) + facet_wrap(~cluster) + geom_line(data=kmeans_centers, color="BLACK", alpha=1/2, size=1/2) + labs(main="Kmeans Clustering (k=9)", xlab="Date", ylab="affordability") + theme(axis.text.x=element_text(angle=45, hjust=1)) ``` This plot shows the result of the k-means algorithm partitioning the data into 16 clusters. We observe that series within each cluster have similar trends. The darker series within each cluster shows the average time series within the cluster. We also see that some clusters correspond to very similar series suggesting that partitioning the data into fewer clusters would be better. ## Dissimilarity-based Clustering For certain algorithms, instead of similarity we work with dissimilarity, often represented as distances. When we have observations defined over attributes, or predictors, we define dissimilarity based on these attributes. Given measurements $x_{ij}$ for $i=1,\ldots,N$ observations over $j=1,\ldots,p$ predictors. Suppose we define a dissimilarity $d_j(x_{ij}, x_{i'j})$, we can then define a dissimilarity between objects as $$ d(x_i, x_{i'}) = \sum_{j=1}^p d_j(x_{ij},x_{i'j}) $$ In the k-means algorithm, and many other algorithms, the most common usage is squared distance $$ d_j(x_{ij},x_{i'j}) = (x_{ij}-x_{i'j})^2 $$ We can use different dissimilarities, for example $$ d_j(x_{ij}, x_{i'j}) = |x_{ij}-x_{i'j}| $$ which may affect our choice of clustering algorithm later on. For categorical variables, we could set $$ d_j(x_{ij},x_{i'j}) = \begin{cases} 0 \; \textrm{if } x_{ij} = x_{i'j} \\ 1 \; \textrm{o.w.} \end{cases} $$ If the values the categorical variable have an intrinsic similarity we could generalize using symmetric matrix $L$ with elements $L_{rr'} = L_{r'r}$, $L_{rr}=0$ and $L_{rr'} \geq 0$ otherwise. This may of course lead to a dissimilarity that is not a proper distance. ## K-means Clustering A commonly used algorithm to perform clustering is the K-means algorithm. It is appropriate when using squared Euclidean distance as the measure of object dissimilarity. $$ \begin{aligned} d(x_{i},x_{i'}) & = \sum_{j=1}^p (x_{ij}-x_{i'j})^2 \\ {} & = \|x_i - x_{i'}\|^2 \end{aligned} $$ K-means partitions observations into $K$ clusters, with $K$ provided as a parameter. Given some clustering, or partition, $C$, the cluster assignment of observation $x_i$ to cluster $k \in \{1,\ldots,K\}$ is denoted as $C(i)=k$. K-means seeks to minimize a clustering criterion measuring the dissimilarity of observations assigned to the same cluster. $$ W(C) = \frac{1}{2} \sum_{k=1}^K \sum_{i: \, C(i)=k} \sum_{i':\, C(i')=k} \|x_i - x_{i'}\|^2 $$ Note however, that this is equivalent to minimizing $$ W(C) = \frac{1}{2}\sum_{k=1}^K N_k \sum_{i:\,C(i)=k} \|x_i - \bar{x}_k\|^2 $$ where $\bar{x}_k=(\bar{x}_{k1},\ldots,\bar{x}_{kp})$ and $\bar{x}_{kj}$ is the average of predictor $j$ over the observations assigned to cluster $k$, i.e., $C(i)=k$, and $N_k$ is the number of observations assigned to cluster $k$. Thus the criteria to minimize is the total distance given by each observation to the mean (centroid) of the cluster to which the observation is assigned. An iterative algorithm is used to minimize this criterion 0. Initialize by choosing $K$ observations as centroids $m_1,m_2,\ldots,m_k$ 1. Assign each observation $i$ to the cluster with the nearest centroid, i.e., set $C(i)=\arg\min_{1 \leq k \leq K} \|x_i - m_k\|^2$ 2. Update centroids $m_k=\bar{x}_k$ 3. Iterate steps 1 and 2 until convergence Here we illustrate the k-means algorithm over four iterations on our example data with $K=4$. Each row in this plot is an iteration of the algorithm, and each column corresponds to a cluster. We observe, that in this case, there is little update of the cluster assignments and centroids beyond the first iteration. ```{r kmeans_illustration, echo=FALSE, message=FALSE, cache=TRUE} set.seed(1234) K <- 4 nobs <- nrow(value_mat) centroid_indices <- sample(nobs, K) centroids <- value_mat[centroid_indices,] assign_cluster <- function(x, m) { # browser() xx <- rowSums(x^2) mm <- rowSums(m^2) xm <- tcrossprod(x, m) d <- xx - 2*xm d <- sweep(d, 2, mm, FUN="+") apply(d, 1, which.min) } get_centroids <- function(x, a) { inds <- split(seq_len(nobs), a) sapply(inds, function(i) colMeans(x[i,,drop=FALSE])) %>% t() } message("Iteration 1") assignments <- assign_cluster(value_mat, centroids) centroid_df <- as.data.frame(cbind(centroids, cluster=seq_len(K), iteration=1)) assignments_df <- cbind(wide_afford_df, cluster=assignments, iteration=1) for (it in seq(2,3)) { message("Iteration ", it) centroids <- get_centroids(value_mat, assignments) assignments <- assign_cluster(value_mat, centroids) tmp <- as.data.frame(cbind(centroids, cluster=seq_len(K), iteration=it)) centroid_df <- rbind(centroid_df, tmp) tmp <- cbind(wide_afford_df, cluster=assignments, iteration=it) assignments_df <- rbind(assignments_df, tmp) } ``` ```{r kmeans_illustration2, echo=FALSE, cache=TRUE} tall_assignments_df <- assignments_df %>% gather(time, affordability, matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m-%d"))) tall_centroids_df <- centroid_df %>% gather(time,affordability,matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m-%d"))) pl <- tall_assignments_df %>% ggplot(aes(x=time, y=affordability)) + geom_line(aes(group=RegionID), color="GRAY", alpha=1/2, size=1/2) + facet_grid(iteration~cluster) + geom_line(data=tall_centroids_df, color="BLACK") + labs(xlab="Date", ylab="mortgage affordability") + theme(axis.text.x=element_text(angle=45, hjust=1)) show(pl) ``` Criterion $W(C)$ is reduced in each iteration so the algorithm is assured to converge. However, as this is not a convex criterion, the clustering we obtain may not be globally optimal. To address this in practice, the algorithm is run with multiple initializations (step 0) and the best clustering achieved is used. Also, selection of observations as centroids can be improved using the K-means++ algorithm: 0. Choose an observation as centroid $m_1$ uniformly at random 1. To choose centroid $m_k$, compute for each observation $i$ not chosen as a centroid the distance to the nearest centroid $d_i = \min_{1\leq l < k} \|x_i - m_l\|^2$ 2. Set centroid $m_k$ to an observation randomly chosen with probability $\frac{e^d_i}{\sum_{i'} e^d_{i'}}$ 3. Iterate steps 1 and 2 until $K$ centroids are chosen This ensures that initial centroids are well distributed in predictor space and yield a better clustering. ## Choosing the number of clusters The number of parameters must be determined before running the K-means algorithm. As opposed to the supervised learning setting where model selection is performed by minimizing expected prediction error through, for example, cross-validation, there is no clean direct method for choosing the number of clusters to use in the K-means algorithm. ```{r gapstat, echo=FALSE, message=FALSE, warning=FALSE, cache=TRUE, results="hide"} set.seed(1234) gap_stat <- cluster::clusGap(value_mat, FUN=kmeans, nstart=15, B=100, K.max=9) gap_stat_df <- gap_stat$Tab %>% as_tibble() %>% rowid_to_column("k") ``` Furthermore, looking at criterion $W(C)$ alone is not sufficient as the criterion will become smaller as the value of $K$ is reduced. Here we show the behavior of $W_K(C)$ in our example dataset. However, there are properties of this statistic that may be used to heuristically choose a proper value of $K$. ```{r logw_plot, echo=FALSE} gap_stat_df %>% ggplot(aes(x=k, y=logW)) + geom_line() + geom_point() ``` The intuition behind the method is that, supposing there is a true underlying number $K^*$ of clusters in the data, improvement in the $W_K(C)$ statistic will be fast for values of $K \leq K^*$, and slower for values of $K > K^*$. In the first case, there will be a cluster which will contain observations belonging to two of the true underlying clusters, and therefore will have poor within cluster similarity. As $K$ is increased, observations may then be separated into separate clusters, providing a sharp improvement in the $W_K(C)$ statistic. On the other hand, for values of $K > K^*$ observations belonging to a single true cluster are split into multiple cluster, all with generally high within-cluster similarity, therefore splitting these clusters further will not improve the $W_K(C)$ statistic very sharply. The curve will therefore have an inflection point around $K^*$. We see this behavior in our example plot, where improvement in $W_K(C)$ is slower after $K=3$. The _gap statistic_ is used to identify the inflection point in the curve. It compares the behavior of the $W_K(C)$ statistic based on the data with the behavior of the $W_K(C)$ statistic for data generated uniformly at random over the range of the data. Below we plot the gap statistic for our example data. For this dataset, the gap statistic suggests that there is no clear cluster structure and therefore $K=1$ is an optimal choice. ```{r gapstat_plot, echo=FALSE} factoextra::fviz_gap_stat(gap_stat) ``` ## Summary Clustering methods are intuitive methods useful to understand structure within unlabeled observations. The K-means algorithm is a frequently used, easy to implement and understand algorithm for clustering based on Euclidean distance between data entities. <file_sep>/materials/slides/geometry/geometry.Rmd --- title: "Data Analysis with Geometry" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Data Analysis with Geometry] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r setup1, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` --- layout: true ## Data Analysis with Geometry --- A common situation: - an outcome attribute (variable) $Y$, and - one or more independent covariate or predictor attributes $X_1,\ldots,X_p$. One usually observes these variables for multiple "instances" (or entities). --- One may be interested in various things: - What effects do the covariates $X_i$ have on the outcome $Y$? - How well can we quantify these effects? - Can we predict outcome $Y$ using covariates $X_i$?, etc... --- ## Motivating Example: Credit Analysis ```{r, echo=FALSE, message=FALSE} library(ISLR) library(tidyverse) data(Default) Default %>% head() %>% knitr::kable(format="html") ``` --- **Task** predict account default What is the outcome $Y$? What are the predictors $X_j$? --- exclude: true We will sometimes call attributes $Y$ and $X$ the outcome/predictors, sometimes observed/covariates, and even input/output. We may call each entity an observation or example. We will denote predictors with $X$ and outcomes with $Y$ (quantitative) and $G$ (qualitative). Notice $G$ are not numbers, so we cannot add or multiply them. We will use $G$ to denote the set of possible values. For gender it would be $G=\{Male,Female\}$. --- layout: true ## From data to feature vectors --- The vast majority of ML algorithms we see in class treat instances as "feature vectors". We can represent each instance as a _vector_ in Euclidean space $\langle x_1,\ldots,x_p,y \rangle$. -- - every measurement is represented as a continuous value - in particular, categorical variables become numeric (e.g., one-hot encoding) --- Here is the same credit data represented as a matrix of feature vectors ```{r, cache=TRUE, echo=FALSE} default_mat <- Default %>% mutate(default=case_when( default == "Yes" ~ +1, TRUE ~ -1 )) %>% mutate(student=case_when( student == "Yes" ~ 1, TRUE ~ 0 )) %>% as.matrix() default_mat %>% as_tibble() %>% group_by(default) %>% sample_n(3) %>% ungroup() %>% sample_frac(1.0) %>% knitr::kable(format="html") ``` --- layout: true ## Technical notation --- - Observed values will be denoted in lower case. So $x_i$ means the $i$th observation of the random variable $X$. - Matrices are represented with bold face upper case. For example $\mathbf{X}$ will represent all observed predictors. - $N$ (or $n$) will usually mean the number of observations, or length of $Y$. $i$ will be used to denote which observation and $j$ to denote which covariate or predictor. --- - Vectors will not be bold, for example $x_i$ may mean all predictors for subject $i$, unless it is the vector of a particular predictor $\mathbf{x}_j$. - All vectors are assumed to be column vectors, so the $i$-th row of $\mathbf{X}$ will be $x_i'$, i.e., the transpose of $x_i$. --- layout: true ## Geometry and Distances --- Now that we think of instances as vectors we can do some interesting operations. Let's try a first one: define a distance between two instances using Euclidean distance $$d(x_1,x_2) = \sqrt{\sum_{j=1}^p(x_{1j}-x_{2j})^2}$$ --- ### K-nearest neighbor classification Now that we have a distance between instances we can create a classifier. Suppose we want to predict the class for an instance $x$. K-nearest neighbors uses the closest points in predictor space predict $Y$. $$ \hat{Y} = \frac{1}{k} \sum_{x_k \in N_k(x)} y_k. $$ $N_k(x)$ represents the $k$-nearest points to $x$. How would you use $\hat{Y}$ to make a prediction? --- ![](img/knnalgo.png) --- ### Inductive bias The assumptions we make about our data that allow us to make predictions. In KNN, our _inductive bias_ is that points that are **nearby** will be of the same class. --- Parameter $K$ is a _hyper-parameter_, it's value may affect prediction accuracy significantly. Question: which situation may lead to _overfitting_, high or low values of $K$? Why? --- layout: true ## The importance of transformations --- class: split-40 Feature scaling is an important issue in distance-based methods. .column[ Which of these two features will affect distance the most?] .column[ ```{r, echo=FALSE, message=FALSE, fig.width=7, fig.align="center", fig.height=5} default_mat %>% as_tibble() %>% ggplot(aes(x=student,y=balance,color=factor(default))) + geom_point() ``` ] --- layout: true ## Quick vector algebra review --- - A (real-valued) vector is just an array of real values, for instance $x = \langle 1, 2.5, −6 \rangle$ is a three-dimensional vector. - Vector sums are computed pointwise, and are only defined when dimensions match, so $$\langle 1, 2.5, −6 \rangle + \langle 2, −2.5, 3 \rangle = \langle 3, 0, −3 \rangle$$. In general, if $c = a + b$ then $cd = ad + bd$ for all vectors $d$. --- Vector addition can be viewed geometrically as taking a vector $a$, then tacking on $b$ to the end of it; the new end point is exactly $c$. ```{r, echo=FALSE, out.width="20%",fig.align="center"} knitr::include_graphics("img/vector_sum.png") ``` --- _Scalar Multiplication_: vectors can be scaled by real values; $$2\langle 1, 2.5, −6 \rangle = \langle 2, 5, −12\rangle$$ In general, $ax = \langle ax_1, ax_2, \ldots, ax_p\rangle$ --- The norm of a vector $x$, written $\|x\|$ is its length. Unless otherwise specified, this is its Euclidean length, namely: $$\|x\| = \sqrt{\sum_{j=1}^p x_j^2}$$ --- ### Quiz Write Euclidean distance of vectors $u$ and $v$ as a vector norm --- The _dot product_, or _inner product_ of two vectors $u$ and $v$ is defined as $$u'v = \sum_{j=1}^p u_i v_i$$ A useful geometric interpretation of the inner product $v'u$ is that it gives the projection of $v$ onto $u$ (when $\|u\|=1$). ```{r, echo=FALSE, out.width="20%", fig.align="center"} knitr::include_graphics("img/innerprod.png") ``` --- layout: false ## The curse of dimensionality Distance-based methods like KNN can be problematic in high-dimensional problems Consider the case where we have many covariates. We want to use $k$-nearest neighbor methods. Basically, we need to define distance and look for small multi-dimensional "balls" around the target points. With many covariates this becomes difficult. --- layout: false ## Summary - We will represent many ML algorithms geometrically as vectors - Vector math review - K-nearest neighbors - The curse of dimensionality <file_sep>/materials/slides/missing-data/missing.Rmd --- title: "Missing Data" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Handling Missing Data] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r, echo=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(tidyverse) theme_set(theme_bw()) ``` --- layout: true ## Handling Missing Data --- We can now move on to a very important aspect of data preparation and transformation: how to deal with missing data? Values that are unrecorded, unknown or unspecified in a dataset. --- ```{r, echo=FALSE, message=FALSE} data_dir <- "data" weather <- read_csv(file.path(data_dir, "weather.csv")) weather ``` ```{r, echo=FALSE} tidy_weather <- weather %>% gather(day, temp, d1:d31) %>% spread(element, temp) tidy_weather ``` --- Temperature observations coded as `NA` are considered _missing_. (a) measurement failed in a specific day for a specific weather station, or (b) certain stations only measure temperatures on certain days of the month, or (c) measurement fails if the temperature is too high or too low -- Knowing which of these applies can change how we approach this missing data. --- Treatment of missing data depends highly on how the data was obtained, The more you know about a dataset, the better decision you can make. --- Central question with missing data is: Should we *remove* observations with missing values, or should we *impute* missing values? -- In fact, can we do anything with a dataset that has missing data? -- Answering this requires us to think **why** the data is missing. --- layout: true ## Mechanisms of missing data --- ### Some preliminaries Let's assume we have the following attributes: - $y$ that contains missing data, (e.g., temperature measurement) - a binary attribute $r$ that encodes if observation in $y$ is missing (this is not in our example dataset), - other attributes $x$ in our dataset (day, month, etc.) --- We will make statements like _depend_ or _not depend_, e.g., value of $r_i$ does not depend on value of $y_i$. -- i.e., the fact that a value is missing $(r_i=1)$ _does not depend_ on (missing) temperature value $(y_i)$. -- For now: _properties of the distribution of $r$ do not change based on values of $y$_. --- ### Missing completely at random (MCAR) *Def*: Missingness $r_i$ does not depend on the (unobserved) value $y_i$ or on observed values $x_i$. _Weather ex._ (a): stations failed for no discernible reason. -- **Removal**: Entities with missing data can be removed from the analysis safely. **Imputation**: Go for it (but see later) --- ### Missing at random (MAR) *Def*: missingness $r_i$ does not depend on the value of $y_i$, but may depend on the value of $x_i$. _Weather ex._ (b): measurements are not taken on specific days of the month (where "day of the month" serves the role of $x$). -- **Removal**: No!, it will bias analysis since you would drop values of $x$ based on missingness and potentially change the distribution of $x$. **Imputation**: Go for it (but see later) --- ### Not missing at random (NMAR) *Def*: missingness $r_i$ depends on $y_i$. *Weather ex.* (c): measurements fail when the temperature is too hot or cold. -- The worst case! Usually means that we want to go back to our collaborator and tell them that we are in a bind. **Removal**: No. **Imputation** No. --- ### Summary The **first step** when dealing with missing data is to understand *why* and *how* data may be missing. I.e., talk to collaborator, or person who created the dataset. --- layout: true ## Handling missing data --- class: split-50 ### Removing missing data (MCAR) Not a lot of entities with missing data: .column[ ```{r, echo=TRUE, eval=FALSE} tidy_weather_nomissing <- tidy_weather %>% tidyr::drop_na(tmax, tmin) ``` ] .column[ ```{r, echo=FALSE, eval=TRUE} tidy_weather_nomissing <- tidy_weather %>% tidyr::drop_na(tmax, tmin) tidy_weather_nomissing ``` ] --- ### Encoding as missing (MCAR or MAR) For categorical attributes: encode the fact that a value is missing as a new category and use in subsequent modeling. ```{r, message=FALSE, echo=FALSE} tb <- read_csv(file.path("data", "tb.csv")) tidy_tb <- tb %>% gather(demo, n, -iso2, -year) %>% separate(demo, c("sex", "age"), sep=1) ``` ```{r, echo=FALSE} tidy_tb %>% tidyr::replace_na(list(iso2="missing")) %>% mutate(iso2_missing=iso2 == "missing") %>% group_by(iso2_missing) %>% slice(1:2) %>% ungroup() %>% sample_frac(1) ``` --- ### Imputation (MCAR) (Also for MAR but not ideal) Numeric values, replace missing values of $y$ with, e.g., the mean of non-missing $y$ ```{r, eval=FALSE} library(nycflights13) flights %>% tidyr::replace_na(list(dep_delay=mean(.$dep_delay, na.rm=TRUE))) ``` Categorical attributes, replace missing $y$ with most common category in non-missing $y$. --- ### Imputation (MAR) Replace missing $y$ predicting from other variables $x$ (we will see linear regression using the `lm` and `predict` functions later on) ```{r, eval=FALSE} dep_delay_fit <- flights %>% lm(dep_delay~origin, data=.) flights %>% modelr::add_predictions(dep_delay_fit, var="pred_delay") %>% mutate(dep_delay_fixed = ifelse(!is.na(dep_delay), dep_delay, pred_delay)) ``` (categorical, use logistic regression) --- ### Imputation After imputation it is useful to add an additional indicator attribute stating if a missing value was imputed ```{r, eval=FALSE} flights %>% mutate(dep_delay_missing = is.na(dep_delay)) ``` --- layout: true ## Implications of imputation --- Imputing missing values as discussed has two effects. *Central tendency of data is retained* If we impute missing data using the mean of a numeric variable, the mean after imputation will not change. This is a good reason to impute based on estimates of central tendency. --- *The _spread_ of the data will change* After imputation, the spread of the data will be smaller relative to spread if we ignore missing values. This could be problematic as underestimating the spread of data can yield over-confident inferences in downstream analysis. <file_sep>/content/homeworks/transformations.md --- title: "Homework 3: Transformations Exercise" date: "2018-03-12" --- An exercise on the effect of transformations on summary statistics. <!--more--> Download questions here: [{{<baseurl>}}misc/transformations.pdf](/misc/transformations.pdf) Submit as PDF on ELMS. You can take a photo of your pen-and-paper work and save as PDF to submit. Better yet, you can also use Rmarkdown (or a Jupyter notebook) and latex to generate a PDF. This is helpful: http://www.stat.cmu.edu/~cshalizi/rmarkdown/#math-in-r-markdown <file_sep>/materials/projects/classification.Rmd --- title: "Project 3 (part 2): Classification" author: "CMSC320" output: html_document --- **Posted: November 14, 2016** **Due: December 2, 2016** **Last Update: `r format(Sys.Date(), "%B %d, %Y")`** ## Gradient Descent **Problem 1** _Implement the gradient descent algorithm (either batch or stochastic versions) for multiple linear regression. I.e., extend the version of the algorithm in the lecture notes to multiple parameters._ The gradient descent update equation for logistic regression is given by: $$ \beta^{k+1} = \beta^k + \alpha \sum_{i=1}^{n} (y_i - p_i(\beta^k))\mathbf{x_i} $$ where (from the definition of log-odds): $$ p_i(\beta^k) = \frac{e^{f_i(\beta^k)}}{1+e^{f_i(\beta^k)}} $$ and $f_i(\beta^k) = \beta_0^k + \beta_1^k x_{i1} + \beta_2^k x_{i2} + \cdots + \beta_p^k x_{ip}$. **Problem 2** _Derive the above update equation_. Write the derivation in your Rmarkdown. Consult the class webpage for multiple examples showing how to include mathematical notation in an Rmarkdown file. **Problem 3** _Implement the gradient descent algorithm (either batch or stochastic versions) for multiple logistic regression._ I.e., modify your code in problem 1 for the logistic regression update equation. Make sure you include in your submission writeup, which version of the algorithm you are solving (stochastic or batch), and make sure to comment your code to help us understand your implementation. **Problem 4** To test your programs, simulate data from the linear regression and logistic regression models and check that your implementations recover the simulation parameters properly. Use the following functions to simulate data for your testing: ```{r} # simulate data for linear regression # # parameters: # - npredictors: number of numeric predictors (variables) # - nobservations: number of observations (examples) # - sd: standard deviation used in random generation of outcome variable # # result: list with following components # - y: outcome variable (vector of length nobservations) # - x: data matrix (matrix of nobservations rows and npredictors columns) # - beta: linear model parameters used to generate data (vector of length npredictors + 1) simulate_regression <- function(npredictors=20, nobservations = 100, sd=1.5) { # generate beta parameters beta <- rnorm(npredictors+1, mean=0, sd=10/npredictors) # generate data matrix x <- matrix(rnorm(nobservations * npredictors), nr=nobservations, nc=npredictors) # generate outcome x1 <- cbind(1, x) y <- x1 %*% beta + rnorm(nobservations, mean=0, sd=sd) # return simulated data list(y=y, x=x, beta=beta) } # simulate data for logistic regression # # parameters: # - npredictors: number of numeric predictors (variables) # - nobservations: number of observations (examples) # # result: list with following components # - g: outcome variable (vector of length nobservations, values are 0 or 1) # - x: data matrix (matrix of nobservations rows and npredictors columns) # - beta: linear model parameters used to generate data (vector of length npredictors + 1) simulate_logistic_regression <- function(npredictors = 20, nobservations = 100) { # generate parameters beta <- rnorm(npredictors+1, mean=0, sd=10/npredictors) x <- matrix(rnorm(nobservations * npredictors), nr=nobservations, nc=npredictors) x1 <- cbind(1, x) # generate outcome, i.e., do coin flips p <- plogis(x1 %*% beta) g <- rbinom(nobservations, size=1, prob=p) # return simulated data list(g=g, x=x, beta=beta) } ``` You can use this function as follows in your submission: ```{r} # a really bad estimator # returns random vector as estimated parameters dummy_gd <- function(x, y) { npredictors <- ncol(x) rnorm(npredictors) } # simulate data set.seed(1234) # seed random generator to get same simulation (useful when debugging) reg_data <- simulate_regression() x <- cbind(1, reg_data$x) # add column of ones as described in class dummy_beta <- dummy_gd(x, reg_data$y) # make a simple plot to compare estimates plot(reg_data$beta, dummy_beta, xlab="simulation parameters", ylab="estimated parameters", pch=19, cex=1.3) ``` Include a similar plot in your writeup and comment on how your gradient descent implementation is working. ## Try it out (a) Find a dataset on which to try out different classification (or regression) algorithms. You can use the dataset used in the "datatypes" assignment earlier this semester if appropriate. Note: we have used the `Weekly` dataset in the `ISLR` package in previous projects, but it's not as interesting as some of the datasets you came up with earlier in the semester. (b) Choose **two** of the following algorithms: (1) Linear Discriminant Analysis (LDA) (only classification) (2) classification (or regression) trees, (3) random forests (4) linear SVM, (5) non-linear SVM (6) k-NN classification (or regression) and compare their prediction performance on your chosen dataset to your logistic regression gradient descent implementation using 10-fold cross-validation and a paired $t$-test (one for each of the two algorithms vs. your logistic regression code). Note: for those algorithms that have hyper-parameters, i.e., all of the above except for LDA, you need to specify in your writeup which model selection procedure you used. ## Handing in: 1) For Problems 1 and 3 include your code in the Rmarkdown writeup. Make sure they are commented and that the code is readable in your final writeup (e.g., check line widths). 2) For Problem 2, include the derivation of the gradient descent update in the writeup 3) For Problem 4, make sure you run the provided code and include the output in the writeup. 5) For the next section organize your writeup as follows: a) Describe the dataset you are using, including: what is the outcome you are predicting (remember this should be a classification task) and what are the predictors you will be using. b) Include code to obtain and prepare your data as a dataframe to use with your three classification algorithms. In case your dataset includes non-numeric predictors, include the code you are using to transform these predictors into numeric predictors you can use with your logistic regression implementation. c) Specify the two additional algorithms you have chosen in part (b), and for algorithms that have hyper-parameters specify the method you are using for model selection. d) Include all code required to perform the 10-fold cross-validation procedure on your three algorithms. e) Writeup the result of your 10-fold cross-validation procedure. Make sure to report the 10-fold CV error estimate (with standard error) of each of the three algorithms. Also report on the result of the _two_ paired $t$-tests comparing your logistic regression algorithm with your chosen two algorithms. Knit the Rmarkdown file and submit to ELMS (link and submission instructions at http://www.hcbravo.org/IntroDataSci/projects/project3/). <file_sep>/docs/misc/hw1_datatypes_wrangling_sample.Rmd --- title: 'HW: Datatypes and Wrangling' author: "<NAME>" date: "`r Sys.Date()`" output: pdf_document: default html_notebook: default editor_options: chunk_output_type: inline --- ```{r knitr_setup, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` ## Data types _1) Provide a URL to the dataset._ I downloaded my dataset from http://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv _2) Explain why you chose this dataset._ I am interested in studying how rates of arrests in different parts of Baltimore are related to demographic statistics. _3) What are the entities in this dataset? How many are there?_ Entities are specific arrests. There are 104528. _4) How many attributes are there in this dataset?_ There are 15 attributes. _5) What is the datatype of each attribute (categorical -ordered or unordered-, numeric -discrete or continuous-, datetime, geolocation, other)? Write a short sentence stating how you determined the type of each attribute. Do this for at least 5 attributes, if your dataset contains more than 10 attributes, choose 10 of them to describe._ | Num | Name | Type | Description | |-----|------|------|-------------| | 1 | `arrest` | categorical | Identifier of each arrest, takes values from finite set | | 2 | `age` | numeric continuous | Ages are numeric values measured in time units | | 3 | `race` | categorical unordered | Can take value from finite set of possible races | | 4 | `sex` | categorical unordered | Can take value from finite set of possible sexes | | 5 | `arrestDate` | datetime | Specifies date of arrest | | 6 | `arrestTime` | datetime | Specifies time of arrest | | 7 | `arrestLocation` | other - address | Street address of arrest | | 8 | `incidentOffense` | categorical unordered | Can take value from finite set of possible offenses | | 9 | `incidentLocation` | other - address | Stree address if incident | | 10 | `charge` | categorical unordered | Can take value from finite set of possible charges | _6) Write R code that loads the dataset using function `read_csv`. Were you able to load the data successfully? If no, why not?_ ```{r load_data, message=FALSE} library(tidyverse) url <- "http://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv" arrest_tab <- read_csv(url) arrest_tab %>% slice(1:10) ``` ## Wrangling 1) My pipeline computes average arrest age (ignoring ages <= 0), for each district and writes them in increasing order ```{r pipeline} mean_ages <- arrest_tab %>% filter(age > 0) %>% select(district, age) %>% group_by(district) %>% summarize(mean_age=mean(age)) %>% arrange(mean_age) mean_ages ``` ## Plotting 1) This barplot shows the average arrest age per district (ignoring ages <= 0) ```{r plot} mean_ages %>% ggplot(aes(x=district, y=mean_age)) + geom_bar(stat="identity") + coord_flip() ```<file_sep>/materials/lecture-notes/18-entity_resolution.Rmd # Entity Resolution and Record Linkage ```{r er_setup, include=FALSE} knitr::opts_chunk$set(cache = TRUE) ``` We have discussed how we model datasets using _entities_ and the _attributes_ that characterize them. Very often, we will be faced with the problem of _data integration_ where we want to combine two (or more) datasets from different sources, especially when they may contain information about the same _entities_. The challenge here is that the _attributes_ in the two datasets may not be named the same, and more problematic, values for the _same_ entity may be different in the two datasets. Here are some examples: - Suppose we are combining data from one dataset with a `Person` table containing attributes `FirstName` and `LastName` with another dataset with `People` table containing attributes `FirstName` and `Surname`. - Suppose there is a row `<<NAME>>` in the first dataset and row `<<NAME>>` in the second. They may refer to the same person, should we combine or _link_ these rows when we combine these datasets? - Even trickier, suppose there is a row `<John, Katz>` in the first, and row `<<NAME>>` in the second? These are examples of a general problem referred to as **Entity Resolution** and **Record Linkage**. We can define the general problem as follows: ## Problem Definition **Given**: Entity sets $E_1$ and $E_2$, **Find**: Linked entities $(e_1,e_2)$ with $e_1 \in E_1$ and $e_2 \in E_2$. One general strategy to solve this problem is to define a _similarity_ function between entities $e_1$ and $e_2$ and link entities with high similarity. ## One approach: similarity function A common way of defining this _similarity_ function $s(e_1,e_2)$ is to define it as an _additive_ function over some set of shared attributes $A$: $$ s(e_1,e_2) = \sum_{j \in A} s_j(e_1[j], e_2[j]) $$ with $s_j$ a similarity function defined for _each_ attribute $j$, itself depending on the _type_ of attribute $j$. Here are some examples: ### Example attribute functions **Categorical attribute**: Here we can specify $s_j$ to state that pairs of entities with the same value are more similar to each other than pairs of entities with different values. E.g., $$ s_j(e_1[j],e_2[j]) = \begin{cases} 1 & \mathrm{ if } \; e_1[j] == e_2[j] \\ 0 & \mathrm { o.w. } \end{cases} $$ **Continuous attribute**: Here we can specify $s_j$ to state that pairs of entities with values that are _close_ to each other are more similar than pairs of entities with values that are _farther_ to each other. Note that to specify _close_ or _far_ we need to introduce some notion of _distance_. We can use Euclidean distance for example, $$ d_j(e_1[j],e_2[j]) = (e_1[j] - e_2[j])^2 \\ s_j(e_1[j],e_2[j]) = e^{-d_j(e_1[j],e_2[j])} $$ **Text attributes**: Here we can use a similar idea but based on edit distance between strings rather than Euclidean distance. Note, however, that often we can use domain knowledge to specify similarity. For example, the fact that `John` and `Johnathan` are similar requires domain knowledge of common usage of English names. ## Solving the resolution problem Equipped with a similarity function $s(e_1,e_2)$, we now need a rule to match entities we think are linked. This depends on assumptions we make about the dataset, similar to assumptions we made when performing joins. In general, we model the entity resolution problem as an _optimization_ problem, where we have an _objective function_ (based on similarity) that we want to maximize over possible sets $V$ of _valid_ pairs $(e_1,e_2)$, where set $V$ constraints pairs based on problem-specific assumptions. Thus, in general, we want to solve the problem $$ $R$ = \arg \max_{V} \sum_{(e_1,e_2) \in V} s(e_1,e_2) $$ ### Many-to-one resolutions Suppose we first constrain sets $V$ to represent many-to-one resolutions. That is, suppose we assume that we want to link every entity $e_1 \in E_1$ to some entity $e_2 \in E_2$, allowing many-to-one linking. Thus, entities in $e_1$ can only appear once in pairs in $V$, but entities $e_2$ may appear more than once. In this case, we can match $(e_1,e_2)$ where $$ e_2 = \arg \max_{e \in E_2} s(e_1,e) $$ That is, the entity in $E_2$ with highest similarity in $E_1$. ### One-to-one resolutions Suppose we constrain sets $V$ to those that represent one-to-one resolutions. That is, suppose we want to link every entity $e_1 \in E_1$, but in a one-to-one matching with entities in $E_2$. Thus if $(e_1,e_2) \in V$ then $e_1$ and $e_2$ appear in only one pair in $V$. In this case, we have a harder computational problem. In fact, this is an instance of the _maximum bipartite matching problem_, and would look at network flow algorithms to solve. ### Other constraints We can add additional constraints to $V$ to represent other information we have about the task. A common one would be to only allow pairs $(e_1,e_2) \in V$ to have similarity above some threshold $t$. I.e., $(e_1, e_2) \in V$ only if $s(e_1,e_2) \geq t$. ## Discussion The procedure outlined above is an excellent first attempt to solve the Entity Resolution problem. This is a classical problem in Data Science for which a variety of approaches and methods are in use. <file_sep>/materials/lecture-notes/24-design_testing.Rmd # Experiment design and hypothesis testing In this section we see one instance in which we can apply the CLT in data analysis. ## Inference One way to think about how we use probability in data analysis (statistical and machine learning) is like this: ![](img/inference.png) The LLN tells us that our parameter $\hat{p}$ will be close to $p$ on average, the CLT lets us answer how confident are we that we found $p$. We do this by constructing a _confidence interval_ as follows. Since $\hat{p} \sim N(p,\frac{\sqrt{p(1-p)}}{\sqrt{n}})$, we want to find an interval $[\hat{p}_{-}, \hat{p}_{+}]$, with $\hat{p}$ at its center, with 95% of the probability specified by the CLT. Why? In that case, there is 95% that the value of parameter $p$ will be within that interval. Now, how do we calculate this interval, since we want the interval to contain 95% of the probability? The probability for the tails (values outside this interval) will be $(1-.95)/2$ (since there are two tails). So, the lower value of the interval will be one where the normal probability distribution (with mean $\hat{p}$ and standard deviation $\frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}$) is such that $P(Y \leq \hat{p}_{-}) = .05/2$, which we can calculate using the function `qnorm` function in R: $$ \begin{align} \hat{p}_{-} & = \mathtt{qnorm}(.05/2, \hat{p}, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}) \\ {} & = \hat{p} + \mathtt{qnorm}(.05/2,0, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}) \end{align} $$ The upper value of the interval is computed with probability $1-(.05/2)$, which by the symmetry of the normal distribution is given by $\hat{p}_{+} = \hat{p} + -\mathtt{qnorm}(.05/2,0, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}})$. Let's see how these intervals look for our twitter bot example: ```{r, message=FALSE} library(dplyr) get_estimate <- function(n, p=0.7) mean(sample(c(0,1), size=n, replace=TRUE, prob=c(1-p,p))) set.seed(1) # let's construct confidence intervals for samples of size n=10,100,500,1000,10000 tab <- data.frame(sample_size=c(10,100,500,1000,10000)) %>% mutate(phat = sapply(sample_size,get_estimate)) %>% mutate(se = sqrt(phat*(1-phat)) / sqrt(sample_size)) %>% mutate(lower = phat + qnorm(.05/2, sd=se)) %>% mutate(upper = phat + -qnorm(.05/2, sd=se)) knitr::kable(tab) ``` For our sample of $n=500$, we would say that our estimate of $p$ is $`r round(tab$phat[3],2)` \pm `r round(qnorm(.05/2,sd=tab$se[3]),2)`$. A compact way of writing that is that our estimate of $p$ is ${}_{`r round(tab$lower[3],2)`}`r round(tab$phat[3],2)`_{`r round(tab$upper[3],2)`}$. ### Hypothesis testing How else is this framework useful? Suppose that before I sampled tweets I thought (hypothesized) that more than 50% of tweets are bot-generated. One way very popular way of thinking about this problem is to reject the hypothesis that this is not the case. In this, case we have a _null_ hypothesis that 50% or less of tweets are bot-generated), against an _alternative_ hypothesis that more than 50% of tweets are bot-generated. You will see this written in statistics textbooks as: $$ \begin{align} H_0: \, & p <= .5 & \textrm{(null)} \\ H_1: \, & p > .5 & \textrm{(alternative)} \end{align} $$ Note: this is a _one-sided_ test vs. a _two-sided_ test where the null hypothesis is that $p=.5$ and the alternative is $p \neq .5$. According to the CLT, estimates $\hat{p}$ of $p$ from $n$ samples would be distributed as $N(.5, \frac{\sqrt{.5(1-.5)}}{\sqrt{n}})$ (we use $p=.5$ as this is the worst case for the hypothesis we want to test). Once we do have our sample of $n$ tweets we can get an estimate $\hat{p}$ as we did before. If we see that $\hat{p}$ (sample mean from our sample of tweets) is _too far_ from $p=.5$ then we could reject the _null_ hypothesis since the estimate we derived from the data we have is not statistically consistent with the _null_ hypothesis. Now, how do we say our estimate $\hat{p}$ is too far? Here, we use the probability model given by the CLT. If $P(Y \geq \hat{p}) \geq .95$ under the null model (of $p=.5$), we say it is too far and we reject. ![](img/testing.png) This 95% rejection threshold is conservative, but somewhat arbitrary. So we use one more metric, $P(|Y| \geq \hat{p})$ (the infamous p-value) to say: we could reject this hypothesis for all thresholds greater than this p-value. Let's see how testing the hypothesis $p > .5$ would look like for our tweet example ```{r} tab <- tab %>% mutate(p_value = 1-pnorm(phat, mean=.5, sd=se)) knitr::kable(tab) ``` Notice that rejection occurs when the parameter value for the null hypothesis $p=.5$ is outside the 95% confidence interval. Another note, these results hold for $n$ sufficiently large that the normal distribution in the CLT provides a good approximation of the distribution of estimates $\hat{p}$. In cases where $n$ is smaller, the $t$-distribution, as opposed to the normal distribution, provides a better approximation of the distribution of estimates $\hat{p}$. In that case, instead of using `pnorm` in the calculations above, we would use `pt` (for $t$-distribution) and the testing procedure above is referred to as a $t$-test (one-sided or two-sided as above). Now, as $n$ grows, the $t$-distribution approaches a normal distribution which is why analysts use the $t$-test regularly. ## A/B Testing A classic experimental design where hypothesis testing is commonly used in A/B testing. Here we are interested in seeing if proposed changes to a webpage has a desired effect. For example, we would like to know if page visitors follow a link more often after a page redesign. ![](img/A-B_testing.png) Here we have two estimates $\hat{p}_A$ and $\hat{p}_B$, the proportion of clicks for design A and B respectively. The null hypothesis we would test is that _there is no difference in proportions_ between the two designs. Mathematically, we would like to know "What is the probability that we observe a difference in proportions this large under the null hypothesis". We will work this out as a homework exercise. ## Summary **Inference**: estimate parameter from data based on assumed probability model (for example, matching expectation. We'll see later another method called maximum likelihood). For _averages_ the LLN and CLT tells us how to compute probabilities from a single parameter estimate, that is, derived from one dataset of samples. With these probabilities we can construct confidence intervals for our estimate. **Testing**: Having a hypothesis about our parameter of interest, we can use probability _under this hypothesis_ to see how statistically consistent our data is with that hypothesis, and reject the hypothesis if data is not statistically consistent enough (again using probability from CLT when dealing with averages). ## Probability Distributions In this example we saw three distributions: ### Bernoulli Notation: $X \sim \mathrm{Bernoulli}(p)$. Values: $X \in \{0,1\}$ Parameter: $p$, $p(X=1)=p$ (probability of success). Expected Value: $\mathbb{E} X = p$ Variance: $\mathrm{var}(X) = p(1-p)$. We can write the probability mass function as $$ p(X=x)=p^x(1-p)^{(1-x)} $$ ### Binomial This corresponds to the number of $1$'s in a draw of $n$ independent $\mathrm{Bernoulli}(p)$ random variables. Notation: $X \sim \mathrm{Bin(n,p)}$. Values: $X \in 0,1,2,\ldots,n$ Parameters: $p$ (probability of success), $n$ number of Bernoulli draws Expected Value: $\mathbb{E} X=np$ Variance: $\mathrm{var}(X) = np(1-p)$ Here the probability mass function is a little more complicated since we have many different ways in which $n$ draws of independent Bernoulli random variables result in the same number of successess $$ p(X=k) = \binom{n}{k} p^k(1-p)^{n-k} $$ ### Normal (Gaussian) distribution Notation: $X \sim N(\mu,\sigma)$ Values: $X \in \mathbb{R}$ Parameters: mean $\mu$, standard deviation $\sigma$ Expected Value: $\mathbb{E} X = \mu$ Variance: $\mathrm{var}(X) = \sigma^2$ The probability density function was given above. A useful reference for probability distributions can be found here: [https://blog.cloudera.com/blog/2015/12/common-probability-distributions-the-data-scientists-crib-sheet/](https://blog.cloudera.com/blog/2015/12/common-probability-distributions-the-data-scientists-crib-sheet/) ### Distributions in R For a majority of common distributions, R has the so-called `d,p,q,r` family of functions: | function | use | |----------|-----| | `d` | probability density (or mass) function | | `p` | cumulative probability function | | `q` | quantile function | | `r` | random value generator | For example, to use these for the Binomial distribution: ```{r, eval=FALSE} # using n=10, p=.3 # compute probability mass function value for k=4 successess dbinom(4, n=10, p=.3) # compute cumulative probability function for k=4 successess pbinom(4, n=10, p=.3) # compute the number of success corresponding to the .80th quantile qbinom(.8, n=10, p=.3) # generate a random value k rbinom(1, n=10, p=.3) ``` <file_sep>/materials/lectures/DataModels/DataModels.Rmd --- title: "Data Models" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Some of this material is based on <NAME>'s material: [https://github.com/umddb/datascience-fall14/blob/master/lecture-notes/models.md](https://github.com/umddb/datascience-fall14/blob/master/lecture-notes/models.md) ## Overview In this section we will principles of preparing and organizing data in a way that is amenable for analysis, both in modeling and visualization. We think of a _data model_ as a collection of concepts that describes how data is represented and accessed. As we will see in the examples of packages `tidyr` and `dplyr`, thinking explicitly about the structure of datasets allows us to design and write general purpose and efficient code. Also, thinking abstractly of data structure, beyond a specific implemetation, makes it easier to share data across programs and systems, and integrate data from different sources. Once we have thought about structure, we can then think about _semantics_: what does data represent? In this section, and in the course overall, we have thought about _structure_ and _semantics_ as follows: - **Structure**: We have assumed that data is organized in rectangular data structures (tables with rows and columns) - **Semantics**: We have discussed the notion of _values_, _attributes_, and _entities_. Recall that we can refer to _attributes_ as _variables_ and _entities_ as _observations_. In our previous section on datatypes, we used the following _data semantics_: a dataset is a collection of _values_, numeric or categorical, organized into _entities_ (_observations_) and _attributes_ (_variables_). Each _attribute_ contains values of a specific measurement across _entities_, and _entities_ collect all measurements across _attributes_. In the database literature, we call this exercise of defining structure and semantics as _data modeling_. Data Modeling is the process of representing/capturing structure in data based on defining: - **Data model**: A collection of concepts that describes how data is represented and accessed - **Schema**: A description of a specific collection of data, using a given data model The purpose of defining abstract data models is that it allows us to know the structure of the data/information (to some extent) and thus be able to write general purpose code. Lack of a data model makes it difficult to share data across programs, organizations, systems that need to be able to integrate information from multiple sources. We can also design algorithms and code that can significantly increase efficiency if we can assume general data structure. For instance, we can preprocess data to make access efficient (e.g., building a B-Tree on a field). A data model typically consists of: - Modeling Constructs: A collection of concepts used to represent the structure in the data. Typically we need to represent types of *entities*, their *attributes*, types of *relationships* between *entities*, and *relationship attributes* - Integrity Constraints: Constraints to ensure data integrity (i.e., avoid errors) - Manipulation Languages: Constructs for manipulating the data We desire that models are sufficiently _expressive_ so they can capture real-world data well, _easy to use_, and lend themselves to defining computational methods that have good performance. Some examples of data models are - Relational, Entity-relationship model, XML... - Object-oriented, Object-relational, RDF... - Current favorites in the industry: JSON, Protocol Buffers, [Avro](http://avro.apache.org/docs/current/), Thrift, Property Graph Why have so many models been defined? There is an inherent tension between descriptive power and ease of use/efficiency. More powerful, expressive, models can be applied to represent more datasets but also tend to be harder to use and query efficiently. Typically there are multiple levels of modeling. _Physical modeling_ concerns itself with how the data is physically stored. _Logical or Conceptual modeling_ concerns itself with type of information stored, the different entities, their attributes, and the relationships among those. There may be several layers of logical/conceptual models to restrict the information flow (for security and/or ease-of-use): - **Data independence:** The idea that you can change the representation of data w/o changing programs that operate on it. - **Physical data independence:** I can change the layout of data on disk and my programs won't change - index the data - partition/distribute/replicate the data - compress the data - sort the data ## Data models: A brief history - **1960's**: Computers finally become attractive, and enterprises start using it. Most applications initially used their own data stores. - **Data base**: coined in military information systems to denote "shared data banks" by multiple applications - Each application had its own format - Although the data was there, basically unavailable to other programs - Often original object code was lost - Instead, define a data format, store it as a "data dictionary", and allow general-purpose "data-base management" software to access it - Issues: - How to write data dictionaries? How to access data? - Disadvantages of integration: integrity, security, privacy concerns - Who controls the data? - Birth of "hierarchical model" and "network model" - Both allowed "connecting" records of different types (e.g., connect "accounts" with "customers") - Network model attempted to be very general and flexible - <NAME> received Turing Award - IBM designed its IMS hierarchical database in 1966 for the Apollo space program; still around today - _.. more than 95 percent of the top Fortune 1000 companies use IMS to process more than 50 billion transactions a day and manage 15 million gigabytes of critical business data_ (from IBM Website on IMS) - Predates *hard disks* - However, both models exposed too much of the internal data structures/pointers etc to the users - **1970's**: Relational Model - Origins in Set Theory - Some early work by D.L.Childs (somewhat forgotten) - <NAME>. "Ted" Codd: Developed the relational model - Elegant, formal model that provided almost complete *data independence* - Users didn't need to worry about how the data was stored, processed etc. - High level query language (relational algebra) - Notion of *normal forms* - Allowed one to reason about and remove redundancies - Led to two influential projects: INGRES (UC Berkeley), System R (IBM) - Also paved the way for a 1977 startup called "Software Development Laboratories" - Didn't care about IMS/IDMS compatibility (as IBM had to) - Many debates in the early 70's between Relational Model proponents and Network Model proponents - <NAME> of IBM was an early CODASYL advocate (later co-invented SQL): - _He (Codd) gave a seminar and a lot of us went to listen to him. This was as I say a revelation for me because Codd had a bunch of queries that were fairly complicated queries and since I'd been studying CODASYL, I could imagine how those queries would have been represented in CODASYL by programs that were five pages long that would navigate through this labyrinth of pointers and stuff. Codd would sort of write them down as one-liners. These would be queries like, "Find the employees who earn more than their managers." [laughter] He just whacked them out and you could sort of read them, and they weren't complicated at all, and I said, "Wow." This was kind of a conversion experience for me, that I understood what the relational thing was about after that._ - **1976**: <NAME> proposed "Entity-Relationship Model" - Allowed higher-level, conceptual modeling; easier for humans to think about - Example ![](er.png) - **1980**: Commercialization/wide-spread acceptance of relational model - SQL emerged as a standard, in large part because of IBM's backing - People still sometimes complain about its limitations - **Late 80's**: Object-oriented, object-relational databases - Enriching the expressive power of relational model - Set-valued attributes, aggregation, generalization,e tc. - Object-oriented: to get around *impedance mismatch* between programming languages and databases - Object-relational: allow user-defined types -- gets many benefits of object-oriented while keeping the essence of relational model - No real differentiation today from pure relational model - Other proposals for semantic data models ## The Entity-Relationship and Relational Models The fundamental objects in this formalism are _entities_ and their _attributes_, as we have seen before, and _relationships_ and _relationship attributes_ which we saw briefly in a previous example, where 'rankings' and 'songs' are distinct types of entities and we define _relationships_ between them. ![](er.png) Here, rectangles are _entitites_, diamonds and edges indicate _relationships_. Circles describe either entity or relationship _attributes_. Arrows are used indicate multiplicity of relationships (one-to-one, many-to-one, one-to-many, many-to-many): ![](relationships.png) Think about what relationships are shown in this diagram? ![](er2.png) In databases and general datasets we work on, both Entities and Relationships are represented as _Relations_ (tables) such that a unique entity/relationship is represented by a single row. This leads to the natural question of how are unique entities determined or defined. Here is where the concept of a _key_ comes in. This is an essential aspect of the Entity-Relationship and Relational models. - A _key_ is a minimal set of _attributes_ that uniquely identifies an entity. - A _primary key_ is used in the ER model to specify a single key, although there may be multiple candidate _keys_ - Relationships also have _keys_, defined by the set of keys of the entities participating in it. #### Exercise Consider the Lahman baseball dataset, included in your class materials ```{r, eval=FALSE} library(Lahman) ?Lahman ``` It contains information about, among other things: - _Franchises_, these are the corporate team entities. Attributes for these can include, _year_established_, _city_, etc. - _Teams_, which are the specific teams fielded by a franchise in a given season. Attributes for these can include, _year_, _wins_, _losses_, etc. - _Players_, who are the people who play the game, attributes can include _name_, _school_attended_, etc. Also, there are season-specific attributes, like _batting average_, _home runs_, etc. - _Salaries_, which indicates how much a franchise is paying a player in a given season. **Draw an ER diagram describing this Schema**. Indicate keys as appropriate. ### Late 90's-today One of the most restrictive aspects of the ER model is the need to specificy a data structure that applies to all objects in the dataset, and the need for _values_ stored in a given table (or relation) to be _atomic_. Recent data models attempt to address these shortcomings using semi-structured, complex, nested models. #### XML: eXtensible Markup Language The data models described above are mostly defined for _structured data_: where a specific and consistent schema is assumed. XML is instead intended for *semi-structured* data, relying on flexible, self-describing schemas: ```xml <?xml version="1.0" encoding="UTF-8"?> <!-- Edited by XMLSpy --> <CATALOG> <CD> <TITLE><NAME></TITLE> <ARTIST><NAME></ARTIST> <COUNTRY>USA</COUNTRY> <COMPANY>Columbia</COMPANY> <PRICE>10.90</PRICE> <YEAR>1985</YEAR> </CD> <CD> <TITLE>Hide your heart</TITLE> <ARTIST><NAME></ARTIST> <COUNTRY>UK</COUNTRY> <COMPANY>CBS Records</COMPANY> <PRICE>9.90</PRICE> <YEAR>1988</YEAR> </CD> ... ``` #### RDF: Resource Description Framework Originally intended as a "metadata data model", its key construct is a "subject-predicate-object" triple: - E.g., subject=sky - predicate=has-the-color - object=blue Direct mapping to a labeled, directed multi-graph, typically stored in relational databases, or what are called "triple-stores". But some graph database products support it as well (e.g., DEX) ```xml <?xml version="1.0" encoding="utf-8"?> <rdf:RDF xmlns:contact="http://www.w3.org/2000/10/swap/pim/contact#" xmlns:eric="http://www.w3.org/People/EM/contact#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"> <rdf:Description rdf:about="http://www.w3.org/People/EM/contact#me"> <contact:fullName><NAME></contact:fullName> </rdf:Description> <rdf:Description rdf:about="http://www.w3.org/People/EM/contact#me"> <contact:mailbox rdf:resource="mailto:e.miller123(at)example"/> </rdf:Description> <rdf:Description rdf:about="http://www.w3.org/People/EM/contact#me"> <contact:personalTitle>Dr.</contact:personalTitle> </rdf:Description> <rdf:Description rdf:about="http://www.w3.org/People/EM/contact#me"> <rdf:type rdf:resource="http://www.w3.org/2000/10/swap/pim/contact#Person"/> </rdf:Description> </rdf:RDF> ``` ![](Rdf_graph_for_Eric_Miller.png) #### JSON: Javascript Object Notation Very similar to XML and seems to be replacing it for many purposes ```json { "firstName": "John", "lastName": "Smith", "isAlive": true, "age": 25, "height_cm": 167.6, "address": { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" }, "phoneNumbers": [ { "type": "home", "number": "212 555-1234" }, { "type": "office", "number": "646 555-4567" } ], "children": [], "spouse": null } ``` This is the format most contemporary data REST APIs use to transfer data. For instance, here is part of a JSON record from a Twitter stream: ```json { "created_at":"Sun May 05 14:01:34+00002013", "id":331046012875583488, "id_str":"331046012875583488", "text":"\u0425\u043e\u0447\u0443, \u0447\u0442\u043e\u0431 \u0442\u044b \u0441\u0434\u0435\u043b\u0430\u043b \u0432\u0441\u0451 \u043d\u0435\u043e\u0431\u0445\u043e\u0434\u0438\u043c\u043e\u0435.\n \\,,\\ *_* \/,,\/", "source":"\u003ca href=\"http:\/\/twitterfeed.com\"rel=\"nofollow\"\u003etwitterfeed\u003c\/a\u003e", "in_reply_to_user_id_str":null, "user":{ "id":548422428, "id_str":"548422428", "name":"\u0410\u0439\u0433\u0435\u0440\u0438\u043c \u041f\u043e\u0433\u043e\u0434\u0438\u043d\u0430", "screen_name":"paddybyrny", "location":"\u0420\u043e\u0441\u0441\u0438\u044f;\u0412\u043b\u0430\u0434\u0438\u0432\u043e\u0441\u0442\u043e\u043a", "followers_count":4188, "friends_count":4281, "lang":"en", "profile_background_image_url":"http:\/\/a0.twimg.com\/images\/themes\/theme1\/bg.png", }, "geo":null, "coordinates":null, "entities":{ "hashtags":[],"symbols":[],"urls":[],"user_mentions":[] },"favorited":false,"retweeted":false,"filter_level":"medium","lang":"ru"} ``` #### Property Graph Model Developed for graph databases, it is basically a edge- and vertex-labeled graph, with properties associated with each edge and vertex ![](property_graph.jpg) #### Related: Serialization formats - Need a way for programs/systems to send data to each other - Several recent technologies all based around schemas - [Protocol Buffers](https://code.google.com/p/protobuf/): Developed by Google - Schema is mostly relational, with support for optional fields and some other constructs - Schema specified using a `.proto` file ```proto message Person { required int32 id = 1; required string name = 2; optional string email = 3; } ``` - Compiled by `protoc` to produce C++, Java, or Python code - Programs can be written in any of those languages, e.g., C++: ```c++ Person person; person.set_id(123); person.set_name("Bob"); person.set_email("<EMAIL>"); fstream out("person.pb", ios::out | ios::binary | ios::trunc); person.SerializeToOstream(&out); out.close(); ``` - [Avro](http://avro.apache.org/): Richer data structures, JSON-specified schema ```json { "namespace": "example.avro", "type": "record", "name": "User", "fields": [ {"name": "name", "type": "string"}, {"name": "favorite_number", "type": ["int", "null"]}, {"name": "favorite_color", "type": ["string", "null"]} ] } ``` - [Thrift](https://thrift.apache.org/): Developed by Facebook, now Apache project - Main goal to support Remote Procedure Calls across languages ### Using modern datamodels in R There are a number of packages to represent and manipulate data in these models in R: - `XML` and `xml2` available in CRAN - `jsonlite`, `rjson`, available in CRAN - `RProtoBuf` available in CRAN - [`ravro`](https://github.com/RevolutionAnalytics/ravro) <file_sep>/materials/lecture-notes/12-two_table_operations.Rmd # Two-table operations ```{r setup12, include=FALSE} knitr::opts_chunk$set(cache = TRUE) ``` So far we have looked at data operations defined over single tables and data frames. In this section we look at efficient methods to combine data from multiple tables. The fundamental operation here is the `join`, which is a workhorse of database system design and impementation. The `join` operation combines rows from two tables to create a new single table, based on matching criteria specified over attributes of each of the two tables. Consider the example of joining the `flights` and `airlines` table: ```{r} library(nycflights13) data(flights) data(airlines) ``` Let's take a look at the `flights` table again: ```{r} flights ``` And add the `airlines` table: ```{r} airlines ``` Here, we want to add airline information to each flight. We can do so by joining the attributes of the respective airline from the `airlines` table with the `flights` table based on the values of attributes `flights$carrier` and `airlines$carrier`. Specifically, every row of `flights` with a specific value for `flights$carrier`, is joined with the the corresponding row in `airlines` with the same value for `airlines$carrier`. We will see four different ways of performing this operation that differ on how non-matching observations are handled. ## Left Join In a `left join`, all observations on left operand (LHS) are retained: ![](img/join_lhs.png) ![](img/left_join.png) ```{r} flights %>% left_join(airlines, by="carrier") ``` RHS variables for LHS observations with no matching RHS observations are coded as `NA`. ## Right Join All observations on right operand (RHS) are retained: ![](img/join_lhs.png) ![](img/right_join.png) ```{r} flights %>% right_join(airlines, by="carrier") ``` LHS variables for RHS observations with no matching LHS observations are coded as `NA`. ## Inner Join Only observations matching on both tables are retained ![](img/join_lhs.png) ![](img/inner_join.png) ```{r} flights %>% inner_join(airlines, by="carrier") ``` ## Full Join All observations are retained, regardless of matching condition ![](img/join_lhs.png) ![](img/full_join.png) ```{r} flights %>% full_join(airlines, by="carrier") ``` All values coded as `NA` for non-matching observations as appropriate. ## Join conditions All join operations are based on a matching condition: ```{r, eval=FALSE} flights %>% left_join(airlines, by="carrier") ``` specifies to join observations where `flights$carrier` equals `airlines$carrier`. In this case, where no conditions are specified using the `by` argument: ```{r, eval=FALSE} flights %>% left_join(airlines) ``` a *natural join* is perfomed. In this case all variables with the same name in both tables are used in join condition. You can also specify join conditions on arbitrary attributes using the `by` argument. ```{r, eval=FALSE} flights %>% left_join(airlines, by=c("carrier" = "name")) ``` ## Filtering Joins We've just seen *mutating joins* that create new tables. *Filtering joins* use join conditions to filter a specific table. ```{r} flights %>% anti_join(airlines, by="carrier") ``` Filters the `flights` table to only include flights from airlines that *are not* included in the `airlines` table. ## SQL Constructs: Multi-table Queries Key idea: - Do a join to get an appropriate table - Use the constructs for single-table queries You will get used to doing all at once. For the first part, where we use a join to get an appropriate table, the general SQL construct includes: - The name of the first table to join - The _type_ of join to do - The name of the second table to join - The join condition(s) - Examples: ```sql select title, year, me.name as producerName from movies m join movieexec me where m.producer = me.id; ``` - Consider the query: ```sql select title, year, producer, count(starName) from movies join starsIn where title = starsIn.movieTitle and year = starsIn.movieYear group by title, year, producer ``` - What about movies with no stars ? - Need to use **outer joins** ```sql select title, year, producer, count(starName) from movies left outer join starsIn on title = starsIn.movieTitle and year = starsIn.movieYear group by title, year, producer ``` As we saw before, all tuples from 'movies' that have no matches in starsIn are included with NULLs (in `dplyr` this was `NA`). So, if a tuple `(m1, 1990)` has no match in `starsIn`, we get `(m1, 1990, NULL)` in the result and the `count(starName)` works correctly then. Note however that `count(*)` would not work correctly (NULLs can have unintuitive behavior) In most systems `JOIN` corresponds to an _inner join_, and include `LEFT JOIN` and `RIGHT JOIN` as well. <file_sep>/materials/slides/intro/intro.Rmd --- title: "Course Introduction and Overview" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: chakra: libs/remark-0.14.0.min.js lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[CMSC320 Introduction to Data Science: Course Introduction and Overview] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] --- ```{r setup, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` ## Business First Course Webpage: http://bit.ly/hcb-ids --- ## What is Data Science? Data science encapsulates the interdisciplinary activities required to create data-centric artifacts and applications that address specific scientific, socio-political, business, or other questions. --- ### Data Observable units of information measured or captured from activity of people, places and things. -- ### Specific Questions Seeking to understand a phenomenon, natural, social or other -- Can we formulate specific questions for which an answer posed in terms of patterns observed, tested and or modeled in data is appropriate. --- ### Interdisciplinary Activities - Formulating a question, assessing the appropriateness of the data and findings used to find an answer require understanding of the specific subject area. -- - Deciding on the appropriateness of models and inferences made from models based on the data at hand requires understanding of statistical and computational methods. --- ### Data-centric artifacts and applications - Answers to questions derived from data are usually shared and published in meaningful, succint but sufficient, reproducible artifacts (papers, books, movies, comics). -- - Going a step further, interactive applications that let others explore data, models and inferences are great. --- class: middle, center ## Data Science .image-50[![](img/conway.png)] --- ## Why Data Science? The granularity, size and accessibility data, comprising both physical, social, commercial and political spheres has exploded in the last decade or more. > I keep saying that the sexy job in the next 10 years will be statisticians” > <NAME>, Chief Economist at Google (http://www.nytimes.com/2009/08/06/technology/06stats.html?_r=0) --- ## Why Data Science? > “The ability to take data—to be able to understand it, to process it, to extract value from it, to visualize it, to communicate it—that’s going to be a hugely important skill in the next decades, not only at the professional level but even at the educational level for elementary school kids, for high school kids, for college kids.” > <NAME> (http://www.mckinsey.com/insights/innovation/hal_varian_on_how_the_web_challenges_managers) --- ## Why Data Science? > “Because now we really do have essentially free and ubiquitous data. So the complimentary scarce factor is the ability to understand that data and extract value from it.” > <NAME> (http://www.mckinsey.com/insights/innovation/hal_varian_on_how_the_web_challenges_managers) --- ## Data Science in Society Large amounts of data produced across many spheres of human activity, -- Many societal questions may be addressed by characterizing patterns in data. --- ## Data Science in Society This can range from unproblematic questions: - how to dissect a large creative corpora, say music, literature, based on raw characteristics of those works, text, sound and image. -- To more problematic questions - analysis of intent, understanding, appreciation and valuation of these creative corpora. --- ## Data Science in Society Issues of fairness and transparency in the current era of big data are especially problematic. - Is data collected representative of population for which inferences are drawn? -- - Are methods employed learning latent unfair factors from ostensibly fair data? -- - These are issues that the research community is now starting to address. --- ## Data Science in Society In all settings, issues of ethical collection of data, application of models, and deployment of data-centric artifacts are essential to grapple with. Issues of privacy are equally important. --- class: split-50 ## Data Science in Society: Machine Learning .column[ Self driving cars make use of ML models for sensor processing. ] .column[ .image-50[.center[![](img/tesla.jpeg)]] ] --- class: split-50 ## Data Science in Society: Machine Learning .column[ Image recognition software uses ML to identify individuals in photos. ] .column[ .image-50[![](img/face_recognition.jpeg)] ] --- class: split-50 ## Data Science in Society: Machine Learning .column[ ML models have been applied to medical imaging to yield expert-level prognosis. ] .column[ .image-50[![](img/pathology.png)] ] --- ## Data Science in Society: Data Journalism [http:://fivethirtyeight.com](http:://fivethirtyeight.com) ```{r five38, echo=FALSE} knitr::include_url("http://fivethirtyeight.com/") ``` --- ## Data Science in Society: Data Journalism [http://www.nytimes.com/section/upshot](http://www.nytimes.com/section/upshot) ```{r upshot, echo=FALSE} knitr::include_url("http://www.nytimes.com/section/upshot" ) ``` --- class: split-50 ## Data Science in Society: Business .column[ .center[.image-70[![](img/moneyball1.png)]] ] -- .column[ .center[.image-50[![](img/moneyball2.png)]] ] --- class: split-50 ## Data Science in Society: Business .column[ In the early 2000's the Oakland A's were winning as much as teams with much bigger payrolls by evaluating players using data differently than other teams. ] .column[ .image-50[![](img/moneyball3.png)] ] --- layout: true ## Data Science in Society: Entertainment --- ### The story of the Netflix Prize In October 2006 Netflix announced a prize around their movie recommendation engine. -- Supervised Machine Learning (ML) task: - Dataset of users and their ratings, (1,2,3,4 or 5 stars), of movies they have rated. - Build an ML model that given predicts a specific user's rating to a movie they have not rated. -- They can recommend movies to users if they predict high rating. --- Netflix would award $1M for the first ML system that provided a 10% improvement to their existing system --- class: split-30 .column[ Existing system had a 0.9514 mean squared error ] .column[ .image-60[![Netflix Challenge 3 week leaderboard](img/netflix1.png) ]] --- class: split-30 .column[ Within three weeks, at least 40 teams had improved upon the existing Netflix system. The top teams were showing improvement over 5%. ] .column[ .image-60[![Netflix Challenge 3 week leaderboard](img/netflix1.png) ]] --- layout: false ## Course organization This course will cover basics of how to represent, model and communicate about data and data analyses using the R and/or Python environments for Data Science -- - Area 0: Tools and skills -- - Area 1: Data types and operations -- - Area 2: Data wrangling -- - Area 3: Modeling -- - Area 4: Applications -- - Area 5: Communication --- class: split-70 ## General Workflow .column[ .left[.image-70[![](img/zumel_mount_cycle.png)]] ] .column[ .source[Zumel and Mount] ] --- class: split-50 ### Defining the goal .column[ - What is the question/problem? - Who wants to answer/solve it? - What do they know/do now? - How well can we expect to answer/solve it? - How well do they want us to answer/solve it? ] .column[ .image-50[![](img/zumel_mount_cycle.png)] ] --- class: split-50 ### Data collection and Management .column[ - What data is available? - Is it good enough? - Is it enough? - What are sensible measurements to derive from this data? Units, transformations, rates, ratios, etc. ] .column[ .image-50[![](img/zumel_mount_cycle.png)] ] --- class: split-50 ### Modeling .column[ - What kind of problem is it? E.g., classification, clustering, regression, etc. - What kind of model should I use? - Do I have enough data for it? - Does it really answer the question? ] .column[ .image-50[![](img/zumel_mount_cycle.png)] ] --- class: split-50 ### Model evaluation .column[ - Did it work? How well? - Can I interpret the model? - What have I learned? ] .column[ .image-50[![](img/zumel_mount_cycle.png)] ] --- class: split-50 ### Presentation .column[ - Again, what are the measurements that tell the real story? - How can I describe and visualize them effectively? ] .column[ .image-50[![](img/zumel_mount_cycle.png)] ] --- class: split-50 ### Deployment .column[ - Where will it be hosted? - Who will use it? - Who will maintain it? ] .column[ .image-50[![](img/zumel_mount_cycle.png)] ] --- # An Illustrative Analysis http://fivethirtyeight.com has a clever series of articles on the types of movies different actors make in their careers: https://fivethirtyeight.com/tag/hollywood-taxonomy/ I'd like to do a similar analysis. Let's do this in order: 1) Let's do this analysis for Diego Luna 2) Let's use a clustering algorithm to determine the different types of movies they make 3) Then, let's write an application that performs this analysis for any actor and test it with <NAME> 4) Let's make the application interactive so that a user can change the actor and the number of movie clusters the method learns. --- ## Gathering data ### Movie ratings For this analysis we need to get the movies Diego Luna was in, along with their Rotten Tomatoes ratings. For that we scrape this webpage: https://www.rottentomatoes.com/celebrity/diego_luna. ```{r, echo=FALSE, message=FALSE,cache=FALSE} library(tidyverse) library(readr) library(rvest) library(stringr) library(broom) ``` ```{r read_dl, echo=FALSE, cache=FALSE, message=FALSE} # URL base for search base_url <- "https://www.rottentomatoes.com/celebrity/" # let's see how this works for Diego Luna # scrape the table from the website dl_url <- paste0(base_url, "diego_luna") dl_html <- read_html(dl_url) dl_tab <- dl_html %>% html_node(".celebrity-filmography") %>% html_node("table") %>% html_table() %>% as_tibble() ``` ```{r clean_dl, echo=FALSE, cache=FALSE, message=FALSE} # clean it up clean_dl_tab <- dl_tab %>% dplyr::filter(Rating != "No Score Yet") %>% mutate(Rating = str_replace(Rating, "%", "")) %>% dplyr::filter(!str_detect(Credit, "Prod") & !str_detect(Credit, "Dir")) %>% readr::type_convert() ``` ```{r, echo=FALSE} clean_dl_tab %>% head(7) %>% knitr::kable("html") ``` This data includes, for each of the movies Diego Luna has acted in, the rotten tomatoes rating, the movie title, Diego Luna's role in the movie, the U.S. domestic gross and the year of release. --- ### Movie budgets and revenue For the movie budgets and revenue data we scrape this webpage: http://www.the-numbers.com/movie/budgets/all ```{r read_budget, eval=FALSE, echo=FALSE, cache=TRUE} # scrape the webpage budget_url <- "http://www.the-numbers.com/movie/budgets/all" budget_html <- read_html(budget_url) budget_tab <- budget_html %>% html_node("table") %>% html_table(fill=TRUE) %>% select(-1) %>% as_tibble() ``` ```{r, eval=FALSE, echo=FALSE} # clean up the result clean_budget_tab <- budget_tab %>% # remove all those NA rows filter(!is.na(`Release Date`)) %>% # make the budget columns look numeric mutate_at(vars(-1,-2), funs(str_replace(., "\\$", ""))) %>% mutate_at(vars(-1,-2), funs(str_replace_all(., ",", ""))) %>% # rename columns rename(release_date=`Release Date`, movie=Movie, production_budget=`Production Budget`, domestic_gross=`Domestic Gross`, worldwide_gross=`Worldwide Gross`) %>% # convert columns to proper types type_convert(cols(release_date=col_date(format="%m/%d/%Y"))) %>% # represent budget and gross in millions mutate_at(vars(-1,-2), funs(. / 1e6)) ``` <!-- (Note 01.2018: after the initial version of this analysis, this website added pagination to this URL. We will be using the CSV file scraped originally in Summer 2017 for this analysis and leave the issue of dealing with pagination as an exercise.) --> ```{r read_budget_csv, message=FALSE, echo=FALSE, cache=TRUE} budget_filename <- "data/movie_budgets.csv" clean_budget_tab <- read_csv(budget_filename) ``` This is part of what we have for that table after scraping and cleaning up: ```{r, echo=FALSE} clean_budget_tab %>% head(10) %>% knitr::kable("html") ``` --- ### Movie budgets and revenue Now we have data for `r nrow(clean_budget_tab)` movies, including its release date, title, production budget, US domestic and worlwide gross earnings. The latter three are in millions of U.S. dollars. --- class: split-40 ### Movie budgets and revenue .column[ One thing we might want to check is if the budget and gross entries in this table are inflation adjusted or not. ] .column[ ```{r, echo=FALSE, message=FALSE, out.width="50%",fig.align="center"} library(lubridate) clean_budget_tab %>% mutate(year=factor(year(release_date))) %>% ggplot() + aes(x=year, y=domestic_gross) + geom_boxplot() + theme_bw() ``` ] --- ## Manipulating the data Next, we combine the datasets we obtained to get closer to the data we need to make the plot we want. ```{r, echo=FALSE} joined_tab <- clean_dl_tab %>% # join the two tables together inner_join(clean_budget_tab, by=c(Title="movie")) ``` We combine the two datasets using the movie title, so that the end result has the information in both tables for each movie. ```{r, echo=FALSE} joined_tab %>% knitr::kable("html") ``` --- ## Visualizing the data ```{r, echo=FALSE, fig.cap="Ratings and U.S. Domestic Gross of Diego Luna's movies.", fig.align="center"} joined_tab %>% ggplot() + theme_bw() + aes(x=Rating, y=domestic_gross) + geom_point(size=3) + labs(title="Diego Luna's movies", x="Rotten Tomato Rating", y="Domestic gross (Millions)") ``` --- ## Modeling data Use a clustering algorithm to partition Diego Luna's movies based on rating and domestic gross. ```{r, echo=FALSE} library(class) library(broom) kmeans_result <- joined_tab %>% select(Rating, domestic_gross) %>% kmeans(centers=3) clustered_tab <- kmeans_result %>% augment(data=joined_tab) %>% rename(cluster=.cluster) %>% as_tibble() kmeans_centers <- kmeans_result %>% tidy() %>% as_tibble() ``` ```{r, echo=FALSE} clustered_tab %>% select(Title, Rating, domestic_gross, cluster) %>% arrange(cluster) %>% knitr::kable("html") ``` --- ## Visualizing model result ```{r, echo=FALSE, fig.align="center"} final_plot <- clustered_tab %>% ggplot() + aes(x=Rating, y=domestic_gross, color=cluster) + geom_point(size=3.4) + theme_bw() + labs(title="<NAME>'s movies", x="Rotten Tomatoes rating", y="Domestic Gross (Millions)") final_plot ``` --- ## Visualizing model result To make the plot and clustering more interpretable, let's annotate the graph with some movie titles. - In the k-means algorithm, each group of movies is represented by an average rating and an average domestic gross. -- - Find the movie in each group that is closest to the average and use that movie title to annotate each group in the plot. ```{r, echo=FALSE, message=FALSE} # join the extended movie table with the centers table annot_tab <- clustered_tab %>% select(title=Title, rating=Rating, domestic_gross, cluster) %>% left_join(select(kmeans_centers, x1, x2, cluster)) %>% # calculate the distance of each movie to its center mutate(center_dist=sqrt((rating-x1)^2+(domestic_gross-x2)^2)) %>% # find the movie closest to each center group_by(cluster) %>% arrange(center_dist) %>% slice(1) ``` --- ### Visualizing model result ```{r, echo=FALSE, fig.align="center"} final_plot + annotate("text", x=annot_tab$x1, y=annot_tab$x2, label=annot_tab$title) ``` --- ## Abstracting the analysis While not a tremendous success, we decide we want to carry on with this analysis. We would like to do this for other actors' movies. One of the big advantages of using R and Python is that we can write a piece of code as functions that takes an actor's name as input, and reproduces the steps of this analysis for that actor. --- ## Abstracting the analysis For our analysis, this function must do the following: 1. Scrape movie ratings from Rotten Tomatoes 2. Clean up the scraped data 3. Join with the budget data we downloaded previously 4. Perform the clustering algorithm 5. Make the final plot With this in mind, we can write functions for each of these steps, and then make one final function that puts all of these together. --- ## Abstracting the analysis For instance, let's write the scraping function. It will take an actor's name and output the scraped data. ```{r, echo=FALSE, cache=FALSE} scrape_rt <- function(actor, base_url="https://www.rottentomatoes.com/celebrity/") { url <- paste0(base_url, actor) html <- read_html(url) html %>% html_node(".celebrity-filmography") %>% html_node("table") %>% html_table() %>% as_tibble() } ``` Let's test it with <NAME>: ```{r scrape_ggb, cache=FALSE, echo=FALSE} ggb_tab <- scrape_rt("gael_garcia_bernal") ``` ```{r, echo=FALSE} ggb_tab %>% head(3) %>% knitr::kable("html") ``` --- class: split-40 ## Abstracting the analysis .column[ We can then write functions for each of the steps we did with Diego Luna before. ```{r, echo=FALSE, cache=FALSE} cleanup_rt_tab <- function(data) { data %>% # make sure the movie is rated dplyr::filter(Rating != "No Score Yet") %>% # make the rating look numeric mutate(Rating = str_replace(Rating, "%", "")) %>% # remove producer and director credits dplyr::filter(!str_detect(Rating, "Prod") & !str_detect(Rating, "Dir")) %>% # convert to proper types readr::type_convert() } ``` ```{r, echo=FALSE} join_budget <- function(data) { data %>% # join the two tables together inner_join(clean_budget_tab, by=c(Title="movie")) } ``` ```{r, echo=FALSE, eval=FALSE} ggb_tab %>% cleanup_rt_tab() %>% join_budget() %>% head() %>% knitr::kable() ``` ```{r, echo=FALSE} cluster_movies <- function(data, k=3) { data <- data %>% select(rating=Rating, title=Title, domestic_gross) kmeans_result <- data %>% select(rating, domestic_gross) %>% kmeans(centers=k) clustered_tab <- kmeans_result %>% augment(data=data) %>% rename(cluster=.cluster) %>% as_tibble() kmeans_centers <- kmeans_result %>% tidy() %>% as_tibble() clustered_tab %>% left_join(select(kmeans_centers, x1, x2, cluster)) %>% # calculate the distance of each movie to its center mutate(center_dist=sqrt((rating-x1)^2+(domestic_gross-x2)^2)) } ``` ```{r, echo=FALSE, eval=FALSE} ggb_tab %>% cleanup_rt_tab() %>% join_budget() %>% cluster_movies() %>% knitr::kable() ``` ```{r, echo=FALSE} plot_movies <- function(data, actor) { plt <- data %>% ggplot() + aes(x=rating, y=domestic_gross, color=cluster) + geom_point(size=2.3) + theme_bw() + labs(title=paste0(actor, "'s movies"), x="Rotten Tomatoes rating", y="Domestic Gross (Millions)") annot_dat <- data %>% group_by(cluster) %>% arrange(center_dist) %>% slice(1) plt <- plt + annotate("text", x=annot_dat$x1, y=annot_dat$x2, label=annot_dat$title) plt } ``` ```{r, echo=FALSE, eval=FALSE} ggb_tab %>% cleanup_rt_tab() %>% join_budget() %>% cluster_movies() %>% plot_movies("<NAME>") ``` ```{r, echo=FALSE, cache=FALSE} analyze_actor <- function(actor, k=3, base_url="https://www.rottentomatoes.com/celebrity/") { # first let's make the name work with RT rt_name <- actor %>% str_to_lower() %>% str_replace_all(" ", "_") message("Scraping Rotten Tomatoes with name ", rt_name) dirty_dat <- scrape_rt(rt_name, base_url=base_url) message("Preparing data for analysis") clean_dat <- dirty_dat %>% cleanup_rt_tab() %>% join_budget() message("Performing clustering and plotting") clean_dat %>% cluster_movies(k=k) %>% plot_movies(actor) } ``` ```{r eval=FALSE} analyze_actor("<NAME>") ``` ] .column[ ```{r test_bbg, cache=FALSE, message=FALSE, fig.height=5, echo=FALSE} analyze_actor("<NAME>") ``` ] --- ## Making analyses accessible Now that we have written a function to analyze an actor's movies, we can make these analyses easier to produce by creating an interactive application that wraps our new function. The `shiny` R package makes creating this type of application easy. https://hcorrada.shinyapps.io/movie_app/ --- ## Summary In this analysis we saw examples of the common steps and operations in a data analysis: 1) Data ingestion: we scraped and cleaned data from publicly accessible sites 2) Data manipulation: we integrated data from multiple sources to prepare our analysis --- ## Summary 3) Data visualization: we made plots to explore patterns in our data 4) Data modeling: we made a model to capture the grouping patterns in data automatically, using visualization to explore the results of this modeling 5) Publishing: we abstracted our analysis into an application that allows us and others to perform this analysis over more datasets and explore the result of modeling using a variety of parameters <file_sep>/content/projects/index.md --- date: 2016-08-29T08:54:44-04:00 title: CMSC320 Projects --- 1. [Data scraping and cleaning](project1/) Due March 9, 2020 2. [Data wrangling and EDA](project2/) Due April 3, 2020 3. [Regression](project3/) Due April 20, 2020 4. [Classification](project4/) Due May 6, 2020 5. [Interactive data visualization and mapping](project5/) Due May 12, 2020 [Final Project:](final_project/) Due May 18, 2020 <file_sep>/materials/lecture-notes/06-more_operations.Rmd # Principles: More Operations ```{r sec06_setup, echo=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") ``` In the previous section we introduced our first few operations to manipulate data frames. Next, we learn a few more: sorting, creating new attributes, summarizing and grouping. Finally we will take a short detour through a discussion on vectors. ## Operations that sort entities The first operation we will look at today is used to sort entities based on their attribute values. As an example, suppose we wanted to find the arrests with the 10 youngest subjects. If we had an operation that re-orders entities based on the value of their `age` attribute, we can then use the `slice` operation we saw before to create a data frame with just the entities of interest ```{r arrange_slice} arrest_tab %>% arrange(age) %>% slice(1:10) ``` The `arrange` operation sorts entities by increasing value of the attributes passed as arguments. The `desc` helper function is used to indicate sorting by decreasing value. For example, to find the arrests with the 10 _oldest_ subjects we would use: ```{r arrange_desc_slice} arrest_tab %>% arrange(desc(age)) %>% slice(1:10) ``` ## Operations that create new attributes We will often see that for many analyses, be it for interpretation or for statistical modeling, we will create new attributes based on existing attributes in a dataset. ![](img/mutate.png) Suppose I want to represent age in months rather than years in our dataset. To do so I would multiply 12 to the existing age attribute. The function `mutate` creates new attributes based on the result of a given expression: ```{r mutate_age} arrest_tab %>% mutate(age_months = 12 * age) %>% select(arrest, age, age_months) ``` ## Operations that summarize attribute values over entities Once we have a set of entities and attributes in a given data frame, we may need to summarize attribute values over the set of entities in the data frame. It collapses the data frame to a single row containing the desired attribute summaries. ![](img/summarize.png) Continuing with the example we have seen below, we may want to know what the minmum, maximum and average age in the dataset is: ```{r summarize_mean} summarize(arrest_tab, min_age=min(age), mean_age=mean(age), max_age=max(age)) ``` The `summarize` functions takes a data frame and calls a summary function over attributes of the data frame. Common summary functions to use include: | Operation(s) | Result | |-----------|-------------| | `mean`, `median` | average and median attribute value, respectively | | `sd` | standard deviation of attribute values | | `min`, `max` | minimum and maximum attribute values, respectively | | `n`, `n_distinct` | number of attribute values and number of _distinct_ attribute values | | `any`, `all` | for logical attributes (TRUE/FALSE): is `any` attribute value TRUE, or are `all` attribute values TRUE | Let's see the number of distinct districts in our dataset: ```{r count_district} summarize(arrest_tab, n_distinct(district)) ``` We may also refer to these summarization operation as **aggregation** since we are computing _aggregates_ of attribute values. ## Operations that group entities Summarization (therefore aggregation) goes hand in hand with data grouping, where summaries are computed _conditioned_ on other attributes. The notion of _conditioning_ is fundamental to data analysis and we will see it very frequently through the course. It is the basis of statistical analysis and Machine Learning models and it is essential in understanding the design of effective visualizations. ![](img/groupby.png) The goal is to group entities with the same value of one or more attributes. The `group_by` function in essence annotates the rows of a data frame as belonging to a specific group based on the value of some chosen attributes. This call returns a data frame that is grouped by the value of the `district` attribute. ```{r groupby} group_by(arrest_tab, district) ``` Subsequent operations are then performed **for each group independently**. For example, when `summarize` is applied to a grouped data frame, summaries are computed for each group of entities, rather than the whole set of entities. For instance, let's calculate minimum, maximum and average age for each district in our dataset: ```{r groupby_summarize} arrest_tab %>% group_by(district) %>% summarize(min_age=min(age), max_age=max(age), mean_age=mean(age)) ``` Note that after this operation we have effectively changed the entities represented in the result. The entities in our original dataset are arrests while the entities for the result of the last example are the districts. This is a general property of group_by and summarize: it defines a data set where entities are defined by distinct values of the attributes we use for grouping. Let's look at another example combining some of the operations we have seen so far. Let's compute the average age for subjects 21 years or older grouped by district and sex: ```{r groupby_example} arrest_tab %>% filter(age >= 21) %>% group_by(district, sex) %>% summarize(mean_age=mean(age)) ``` **Exercise**: Write a data operation pipeline that 1) filters records to the southern district and ages between 18 and 25 2) computes mean arrest age for each sex ## Vectors We briefly saw previously operators to create vectors in R. For instance, we can use `seq` to create a vector that consists of a sequence of integers: ```{r seq_example} multiples_of_three <- seq(3, 30, by=3) multiples_of_three ``` Let's how this is represented in R (the `str` is very handy to do this type of digging around): ```{r class_seq, cache=FALSE} str(multiples_of_three) ``` So, this is a `numeric` vector of length 10. Like many other languages we use square brackets `[]` to index vectors: ```{r indexing} multiples_of_three[1] ``` We can use ranges as before ```{r index_range} multiples_of_three[1:4] ``` We can use vectors of non-negative integers for indexing: ```{r index_vec} multiples_of_three[c(1,3,5)] ``` Or even logical vectors: ```{r index_logical} multiples_of_three[c(TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE)] ``` In R, most operations are designed to work with vectors directly (we call that _vectorized_). For example, if I want to add two vectors together I would write: (look no `for` loop!): ```{r sum_vec} multiples_of_three + multiples_of_three ``` This also works for other arithmetic and logical operations (e.g., `-`, `*`, `/`, `&`, `|`). Give them a try! In data analysis the _vector_ is probably the most fundamental data type (other than basic numbers, strings, etc.). Why? Consider getting data about one attribute, say height, for a group of people. What do you get? An array of numbers, all in the same unit (say feet, inches or centimeters). How about their name? Then you get an array of strings. Abstractly, we think of vectors as arrays of values, all of the same _class_ or datatype. ## Attributes as vectors In fact, in the data frames we have been working on, each column, corresponding to an attribute, is a vector. We use the `pull` function to extract a vector from a data frame. We can then operate index them, or operate on them as vectors ```{r pull_age} age_vec <- arrest_tab %>% pull(age) age_vec[1:10] ``` ```{r add_age} 12 * age_vec[1:10] ``` We previously saw how the `$` operator serves the same function. ```{r dollar} age_vec <- arrest_tab$age age_vec[1:10] ``` The `pull` function however, can be used as part of a pipeline (using operator `%>%`): ```{r mean_age_vec} arrest_tab %>% pull(age) %>% mean() ``` ## Functions Once we have established useful pipelines for a dataset we will want to abstract them into reusable functions that we can apply in other analyses. To do that we would write our own functions that encapsulate the pipelines we have created. As an example, take a function that executes the age by district/sex summarization we created before: ```{r func_example} summarize_district <- function(df) { df %>% filter(age >= 21) %>% group_by(district, sex) %>% summarize(mean_age=mean(age)) } ``` You can include multiple expressions in the function definition (inside brackets `{}`). Notice there is no `return` statement in this function. When a function is called, it returns the value of the last expression in the function definition. In this example, it would be the data frame we get from applying the pipeline of operations. You can find more information about vectors, functions and other programming matters we might run into in class in Chapters 17-21 of [R for Data Science](http://r4ds.had.co.nz/program-intro.html) **Exercise** Abstract the pipeline you wrote in the previous unit into a function that works for arbitrary districts. The function should take arguments `df` and `district`. <file_sep>/content/calendar/index.md --- date: 2016-08-30T10:13:51-04:00 title: CMSC320 Calendar --- <file_sep>/materials/lecture-notes/22-eda_missing_data.Rmd # EDA: Handling Missing Data ```{r, echo=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(tidyverse) theme_set(theme_bw()) ``` We can now move on to a very important aspect of data preparation and transformation: how to deal with missing data? By missing data we mean values that are unrecorded, unknown or unspecified in a dataset. We saw an example of this when we looked at the tidy unit. Here is the tidy weather dataset again: ```{r, echo=FALSE, message=FALSE} data_dir <- "data" weather <- read_csv(file.path(data_dir, "weather.csv")) weather ``` And the result of tidying this dataset: ```{r} tidy_weather <- weather %>% gather(day, temp, d1:d31) %>% spread(element, temp) tidy_weather ``` In this dataset, temperature observations coded as `NA` are considered _missing_. Now, we can imagine a few reasons why measurements would be missing in this dataset: (a) the measurement failed in a specific day for a specific weather station, or (b) certain stations only measure temperatures on specific days of the month, or (c) the measurement fails if the temperature is too high or too low. Knowing which of these applies can change how we approach this missing data. As you can see, how to treat missing data depends highly on how the data was obtained, and the more you know about a dataset, the better decision you can make. In general, the central question with missing data is: should we *remove* observations with missing values, or should we *impute* missing values? In fact, can we do anything with a dataset that is missing data at all? The answers to these require us to think about **why** the data is missing. ## Mechanisms of missing data For this discussion let's assume we have an attribute $y$ that contains missing data, a binary attribute $r$ that encodes if observation in $y$ is missing, and other attributes $x$ in our dataset. Also, we will make statements like _depend_ or _not depend_, e.g., value of $r_i$ does not depend on value of $y_i$. For now, until we formalize this concept further, you can take this to mean the: _properties of the distribution of $r$ do not change based on values of $y$_. **Data missing completely at random (MCAR)**: missingness does not depend on any values of the missing or measured data. That is missingness $r_i$ does not depend on the (unobserved) value $y_i$ or on observed values $x_i$. In this case, entities with missing data can be removed from the analysis safely. Imputation can be performed, see more below. In our weather example, this would be case (a): stations failed for no discernible reason. **Data missing at random (MAR)**: missingness $r_i$ does not depend on value of $y_i$, but may depend on the value of $x_i$. Here, removing data can bias analysis since you would drop values of $x$ based on missingness and potentially change the distribution of $x$. Imputation can be done as well, see more below. In our weather example, this would be case (b): measurements are not taken on specific days of the month (where "day of the month" serves the role of $x$). **Data not missing at random (NMAR)**: missingness $r_i$ depends on $y_i$. This is the most pernicious of all, and usually means that we want to go back to our collaborator and tell them that we are in a bind. Removing or imputing data as discussed below is not appropriate in this case, and appropriate methods to deal with it are beyond the scope of this discussion. This is a good resource: https://www.wiley.com/en-us/Statistical+Data+Cleaning+with+Applications+in+R-p-9781118897157 (Ch. 10). In our weather example this would be case (c): measurements fail when the temperature is too hot or cold. So in general, the **first step** when dealing with missing data is to understand *why* and *how* data may be missing. I.e., talk to collaborator, or person who created the dataset. ## Handling missing data ### Removing missing data Once you know that data is MCAR and a relatively small fraction of observations have missing values, then it may be safe to remove observations. ```{r} tidy_weather_nomissing <- tidy_weather %>% tidyr::drop_na(tmax, tmin) tidy_weather_nomissing ``` ### Encoding as missing In the MCAR or MAR case for categorical attributes $y$, a useful approach is to encode the fact that a value is missing as a new category and include that in subsequent analysis of attribute $y$. ```{r, message=FALSE} tb <- read_csv(file.path("data", "tb.csv")) tidy_tb <- tb %>% gather(demo, n, -iso2, -year) %>% separate(demo, c("sex", "age"), sep=1) tidy_tb %>% tidyr::replace_na(list(iso2="missing")) ``` ### Imputation #### MCAR (also for MAR, but this is not ideal) In this case we can use a simple method for imputation of $y$. For numeric attributes we replace missing values in $y$ with the mean of non-missing values of $y$. ```{r} flights %>% tidyr::replace_na(list(dep_delay=mean(.$dep_delay, na.rm=TRUE))) ``` For categorical attributes $y$, we replace missing values with the most common category in the non-missing values of $y$. #### MAR In this case we use a more complex method by replacing missing values for attribute $y$ predicting from other variables $x$ when variables are related (we will see linear regression using the `lm` and `predict` functions later on) ```{r} dep_delay_fit <- flights %>% lm(dep_delay~origin, data=.) # use average delay conditioned on origin airport flights %>% modelr::add_predictions(dep_delay_fit, var="pred_delay") %>% mutate(dep_delay_fixed = ifelse(!is.na(dep_delay), dep_delay, pred_delay)) %>% select(origin, dest, dep_delay, dep_delay_fixed) %>% filter(is.na(dep_delay)) ``` For categorical attributes we use a different kind of regression more appropriate to categorical attributes (logistic regression, again we will see that later on). For both imputation methods, a common approach is to add an additional indicator variable stating if numeric missing value was imputed ```{r} flights %>% mutate(dep_delay_missing = is.na(dep_delay)) ``` ## Implications of imputation Imputation has some effects that can impact analysis. (a) The central tendency of data is retained. For example, if we impute missing data using the mean of a numeric variable, the mean after imputation will not change. This is a good reason to impute based on estimates of central tendency. (b) The _spread_ of the data will change. After imputation, the spread of the data will be smaller relative to spread if we ignore missing values. This could be problematic as underestimating the spread of data can yield over-confident inferences in downstream analysis. We will not address these issues directly in later chapters, but you should be aware of this. <file_sep>/materials/lecture-notes/21-eda_transformations.Rmd # EDA: Data Transformations ```{r, echo=FALSE, warning=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(tidyverse) theme_set(theme_bw()) ``` Having a sense of how data is distributed, both from using visual or quantitative summaries, we can consider transformations of variables to ease both interpretation of data analyses and the application statistical and machine learning models to a dataset. ## Centering and scaling A very common and important transformation is to scale data to a common unit-less scale. Informally, you can think of this as transforming variables from whatever units they are measured (e.g., diamond depth percentage) into "standard deviations away from the mean" units (actually called _standard units_, or $z$-score). Given data $x = x_1, x_2, \ldots, x_n$, the transformation applied to obtain centered and scaled variable $z$ is: $$ z_i = \frac{(x_i - \overline{x})}{\mathrm{sd}(x)} $$ where $\overline{x}$ is the mean of data $x$, and $\mathrm{sd}(x)$ is its standard deviation. ```{r} library(ggplot2) data(diamonds) diamonds %>% mutate(scaled_depth = (depth - mean(depth)) / sd(depth)) %>% ggplot(aes(x=scaled_depth)) + geom_histogram(binwidth=.5) ``` Question: what is the mean of $z$? What is it's standard deviation? Another name for this transformation is to _standardize_ a variable. One useful result of applying this transformation to variables in a dataset is that all variables are in the same, and thus comparable units. On occasion, you will have use to apply transformations that only _center_ (but not scale) data: $$ z_i = (x_i - \overline{x}) $$ Question: what is the mean of $z$ in this case? What is it's standard deviation? Or, apply transformations that only _scale_ (but not center) data: $$ z_i = \frac{x_i}{\mathrm{sd}(x)} $$ Question: what is the mean of $z$ in this case? What is it's standard deviation? ## Treating categorical variables as numeric Many modeling algorithms work strictly on numeric measurements. For example, we will see methods to predict some variable given values for other variables such as linear regression or support vector machines, that are strictly defined for numeric measurements. In this case, we would need to transform categorical variables into something that we can treat as numeric. We will see more of this in later sections of the course but let's see a couple of important guidelines for _binary_ variables (categorical variables that only take two values, e.g., `health_insurance`). One option is to encode one value of the variable as 1 and the other as 0. For instance: ```{r} library(ISLR) library(tidyverse) data(Wage) Wage %>% mutate(numeric_insurace = ifelse(health_ins == "1. Yes", 1, 0)) %>% head() ``` Another option is to encode one value as 1 and the other as -1: ```{r} Wage %>% mutate(numeric_insurance = ifelse(health_ins == "1. Yes", 1, -1)) %>% head() ``` The decision of which of these two transformations to use is based on the method to use or the goal of your analysis. For instance, when predicting someone's wage based on their health insurance status, the 0/1 encoding let's us make statements like: "on average, wage increases by $XX if a person has health insurance". On the other hand, a prediction algorithm called a Support Vector Machine is strictly defined on data coded as 1/-1. For categorical attributes with more than two values, we extend this idea and encode _each_ value of the categorical variable as a 0/1 column. You will see this referred to as _one-hot-encoding_. ```{r} Wage %>% mutate(race_white = ifelse(race == "1. White", 1, 0), race_black = ifelse(race == "2. Black", 1, 0), race_asian = ifelse(race == "3. Asian", 1, 0), race_other = ifelse(race == "4. Other", 1, 0)) %>% select(starts_with("race")) %>% head() ``` The builtin function `model.matrix` does this general transformation. We will see it when we look at statistical and Machine Learning models. ### Discretizing continuous values. How about transforming data in the other direction, from continuous to discrete values. This can make it easier to compare differences related to continuous measurements: Do doctors prescribe a certain medication to older kids more often? Is there a difference in wage based on age? It is also a useful way of capturing non-linear relationships in data: we will see this in our regression and prediction unit. Two standard methods used for discretization are to use **equal-length** bins, where variable range is divided into bins _regardless_ of the data distribution: ```{r, eval=TRUE} flights %>% mutate(dep_delay_discrete = cut(dep_delay, breaks=100)) %>% ggplot(aes(x=dep_delay_discrete)) + geom_bar() ``` The second approach uses **equal-sized** bins, where the range is divided into bins _based_ on data distribution ```{r, eval=TRUE} flights %>% mutate(dep_delay_discrete = cut(dep_delay, breaks=quantile(dep_delay, probs=seq(0,1,len=11), na.rm=TRUE))) %>% ggplot(aes(x=dep_delay_discrete)) + geom_bar() ``` In both cases, the `cut` function is used to apply discretization, with the `breaks` argument determining which method is applied. In the first example, `breaks=100` specifies that 100 bins of equal-length are to be used. In the second example, the `quantile` function is used to define 10 equal-sized bins. ## Skewed Data In many data analysis, variables will have a _skewed_ distribution over their range. In the last section we saw one way of defining skew using quartiles and median. Variables with skewed distributions can be hard to incorporate into some modeling procedures, especially in the presence of other variables that are not skewed. In this case, applying a transformation to reduce skew will improve performance of models. Also, skewed data may arise when measuring *multiplicative* processes. This is very common in physical or biochemical processes. In this case, interpretation of data may be more intiuitive after a transformation. We have seen an example of skewed data previously when we looked at departure delays in our flights dataset. ```{r echo=FALSE} library(tidyverse) library(nycflights13) ``` ```{r, fig.width=20} flights %>% ggplot(aes(x=dep_delay)) + geom_histogram(binwidth=30) ``` Previously, we looked at a way of determining skew for a dataset. Let's see what that looks like for the `dep_delay` variable: (see [dplyr vignette](https://cran.r-project.org/web/packages/dplyr/vignettes/programming.html) for info on 'enquo' and '!!') ```{r} compute_skew_stat <- function(df, attribute) { attribute <- enquo(attribute) df %>% summarize(med_attr=median(!!attribute, na.rm=TRUE), q1_attr=quantile(!!attribute, 1/4, na.rm=TRUE), q3_attr=quantile(!!attribute, 3/4, na.rm=TRUE)) %>% mutate(d1 = med_attr - q1_attr, d2 = q3_attr - med_attr, skew_stat = d1 - d2) %>% select(d1, d2, skew_stat) } flights %>% compute_skew_stat(dep_delay) ``` In many cases a logarithmic transform is an appropriate transformation to reduce data skew: - If values are all positive: apply `log2` transform - If some values are negative, two options - Started Log: shift all values so they are positive, apply `log2` - Signed Log: $sign(x) \times log2(abs(x) + 1)$. Here is a signed log transformation of departure delay data: ```{r} transformed_flights <- flights %>% mutate(transformed_dep_delay = sign(dep_delay) * log2(abs(dep_delay) + 1)) transformed_flights %>% ggplot(aes(x=transformed_dep_delay)) + geom_histogram(binwidth=1) ``` Let's see if that reduced the skew of the dataset: ```{r} transformed_flights %>% compute_skew_stat(transformed_dep_delay) ``` <file_sep>/materials/lectures/SGD/sgd.Rmd --- title: "Stochastic Gradient Descent: Learning models for large-scale data" author: CMSC320 date: "`r Sys.Date()`" --- ```{r, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` In this unit we address the question: How to fit the type of analysis methods we've seen so far? How to do so for large datasets? We have seen two learning models, linear models for regression and logistic regression for classification. In this unit we derive a general algorithm, stochastic gradient descent (SGD), to learn the parameters of these models. This is not necessarily the best algorithm to fit these models, but it has many appealing features: it is conceptually simple, computationally efficient (remember this is not the same as learning, or estimation, efficiency), and it has a structure that makes it straightforward to apply to large datasets. We will use linear regression as a case study to develop SGD. ### Case Study Let's use linear regression with one predictor, no intercept as a case study. **Given**: Training set $\{(x_1, y_1), \ldots, (x_n, y_n)\}$, with continuous response $y_i$ and single predictor $x_i$ for the $i$-th observation. **Do**: Estimate parameter $\beta_1$ in model $y=\beta_1 x$ to solve $$ \min_{\beta_1} L(\beta_1) = \frac{1}{2} \sum_{i=1}^n (y_i - \beta_1 x_i)^2 $$ And suppose we want to fit this model to the following (simulated) data: ```{r, fig.height=10, fig.width=15} set.seed(1234) true_beta <- 5 x <- runif(100, -10, 10) y <- x * true_beta + rnorm(100, mean=0, sd=sqrt(10)) plot(x,y,pch=19,cex=1.4,main="Simulated Data", cex.lab=1.5, cex.main=2) abline(a=0, b=true_beta, col="red", lwd= 2) ``` Our goal is then to find the value of $\beta_1$ that minimizes mean squared error. This corresponds to finding one of these many possible lines: ```{r, echo=FALSE, fig.height=10, fig.width=15} plot(x,y,pch=19,cex=1.4,main="Simulated Data", cex.lab=1.5, cex.main=2) abline(a=0, b=true_beta, col="red", lwd= 2) for (b in seq(-6,6, len=5)) { abline(a=0,b=b,col="blue", lwd=2, lty=2) } legend("bottom", legend=paste("beta=", seq(-6,6,len=5)), lwd=2, lty=2, cex=1.5) ``` Each of which has a specific error for this dataset: ```{r, echo=FALSE, fig.height=10, fig.width=15} n <- length(y) compute_loss <- function(beta, x, y) { 0.5 * mean((y-x*beta)^2) } beta <- seq(-20, 20, len=100) plot(beta, sapply(beta, compute_loss, x=x, y=y), type="l", lwd=2, ylab=expression(L(beta[1])),cex.lab=1.5,xlab=expression(beta[1])) abline(v=true_beta, col="red", lwd=2) abline(v=seq(-6,6,len=5), col="blue", lwd=2, lty=2) ``` Insights: 1) As we saw before in class, loss is minimized when the derivative of the loss function is 0 2) and, (this is key for this algorithm), the derivative of the loss (with respect to $\beta_1$) at a given estimate $\beta_1$ suggests new values of $\beta_1$ with smaller loss! Let's take a look at the derivative: $$ \frac{\partial}{\partial \beta_{1}} L(\beta_1) = \frac{\partial}{\partial \beta_{1}} \frac{1}{2} \sum_{i=1}^n (y_i - \beta_1 x_i)^2 \\ {} = \sum_{i=1}^n (y_i - \beta_1 x_i) \frac{\partial}{\partial \beta_1} (y_i - \beta_1 x_i) \\ {} = \sum_{i=1}^n (y_i - \beta_1 x_i) (-x_i) $$ and plot it for our case study data: ```{r, echo=FALSE, fig.width=15, fig.height=10, cache=FALSE} loss_derivative <- function(beta, x, y) { f <- beta * x resid <- y - f sum(resid * (-x)) } plot(beta, sapply(beta, loss_derivative, x=x, y=y), type="l", lwd=1.5, xlab=expression(beta[1]), ylab=expression(partialdiff * L(beta[1]) / partialdiff * beta[1]),cex.lab=1.7) abline(v=true_beta, col="red", lwd=2) abline(v=seq(-6,6,len=5), col="blue", lwd=2, lty=2) abline(h=0, col="black", lwd=2, lty=2) ``` We can see that the **negative gradient** indicates a direction in which the loss function is reduced. That is, where fit error is reduced. In this example, the derivative at $\beta_1=0$ indicates we should _increase_ $\beta_1$ to _decrease_ loss. Notice, however, that moving $\beta_1$ in the negative gradient direction arbitrarily is not always good since moving $\beta_1$ in the positive direction can increase error. ### Gradient Descent This plot suggests an algorithm: 1. Initialize $k=0$ and $\beta_1^k=0$ 2. Repeat until convergence - Set $\beta_1^{k+1} = \beta_1^k + \alpha \sum_{i=1}^n (y_i - f(x_i; \beta_1^k)) x_i$ - Set $k=k+1$ where $f(x_i;\beta_1)=\beta_1 x_i$. This algorithm is called **gradient descent** in the general case. The basic idea is to move the current estimate of $\beta_1$ in the direction that minimizes loss the *fastest*. Another way of calling this algorithm is **Steepest Descent**. However, we use a step-size $\alpha$ to prevent moving too far in the direction of steepest descent. This is a full implementation of this algorithm (for a single predictor) in R: ```{r} # Implementation of gradient descent for least squares regression # for a single predictor (x) # # There is some code here that is only used to generate illustrative plots and would not be part of real solver gradient_descent <- function(x, y, tol=1e-6, maxit=50, plot=FALSE) { # initialize estimate beta_1 <- 0; old_beta_1 <- Inf; i <- 0; beta_keep <- NA # compute loss at first estimate loss <- compute_loss(beta_1, x, y); loss_keep <- NA # starting step size alpha <- 1e-3 difference <- Inf # check for convergence # (in practice, we do include a limit on the number of iterations) while ((difference > tol) && (i < maxit)) { cat("it: ", i, " beta: ", round(beta_1, 2), "loss: ", round(loss, 2), " alpha: ", round(alpha, 6), "\n") # this piece of code just adds steps to an existing plot if (plot && !is.na(beta_keep) && !is.na(loss_keep)) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } # store the last estimate for plotting beta_keep <- beta_1; loss_keep <- loss; # store the last estimate to check convergence old_beta_1 <- beta_1 # update estimate f <- beta_1 * x resid <- y - f beta_1 <- beta_1 + alpha * sum(resid * x) # compute difference after taking step # to check convergence difference <- (beta_1 - old_beta_1)^2 / (old_beta_1)^2 # compute loss and derivative for updated estimate loss <- compute_loss(beta_1, x, y) i <- i+1 # shorten the step size if ((i %% 3) == 0) alpha <- alpha / 2 } if (plot) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } beta_1 } ``` Let's run this algorithm and track what it does: ```{r, echo=FALSE, fig.width=15, fig.height=10, cache=FALSE} plot(beta, sapply(beta, compute_loss, x=x, y=y), type="l", lwd=2, ylab=expression(L(beta[1])),cex.lab=1.5,xlab=expression(beta[1]), xlim=c(-20,20), main="Gradient Descent") estimate <- gradient_descent(x, y, plot=TRUE) ``` This algorithm is referred to as "Batch" gradient descent, since we take a step (update $\beta_1$) by calculating its derivative with respect to _all_ $n$ observations in our dataset. For clarity, let's write out the update equation again: $$ \beta_1^{k+1} = \beta_1^k + \alpha \sum_{i=1}^n (y_i - f(x_i; \beta_1^k)) x_i $$ where $f(x_i; \beta_1^k) = \beta_1^k x_i$. ## Multiple Regression For multiple predictors (e.g., adding an intercept), this generalizes to the _gradient_ i.e., the vector of first derivatives of _loss_ with respect to parameters. In this case, $f(\mathbf{x}_i; \mathbf{\beta}) = \beta_0 + \beta_1 x_{i1} + \cdots + \beta_p x_{ip}$, where $\mathbf{\beta}$ indicates the _vector_ of parameters in the model, and $\mathbf{x_i}$ indicates the _vector_ of predictor values for example (entity) $i$. In this case, the loss function is $$ L(\mathbf{\beta}) = \frac{1}{2} \sum_{i=1}^n (y_i-f(\mathbf{x_i}; \mathbf{\beta}))^2 $$ and the gradient of loss as a function of parameters $\mathbf{\beta}$ is then given by $$ \nabla_{\mathbf{\beta}} L(\mathbf{\beta}) = \sum_{i=1}^n (y_i - f(\mathbf{x_i}; \mathbf{\beta}))(-\mathbf{x_i}) $$ The update equation in gradient descent has exactly the same form as the single predictor case: $$ \mathbf{\beta}^{k+1} = \mathbf{\beta}^k + \alpha \sum_{i=1}^n (y_i - f(\mathbf{x}_i; \mathbf{\beta}^k)) \mathbf{x}_i $$ Gradiest descent falls within a family of optimization methods called _first-order methods_ (first-order means they use derivatives only). These methods have properties amenable to use with very large datasets: 1. Inexpensive updates 2. "Stochastic" version can converge with few sweeps of the data 3. "Stochastic" version easily extended to streams 4. Easily parallelizable Drawback: Can take many steps before converging ### Stochastic gradient descent One of the appeals of the gradient descent algorithm is that it can be easily adapted for use in settings where training data is large (either large number of entities, or large number of predictors, or both). In the case of large numbers of observations, a key idea can be used to adapt the the algorithm to handle this case. As presented, the algorithm updates parameters based on all observations (note the sum over observations above). However, we can also update parameters using the update equation _one observation at a time_: 1. Initialize $\beta=\mathbf{0}$, $i=1$ 2. Repeat until convergence - For $i=1$ to $n$ - Set $\beta = \beta + \alpha (y_i - f(\mathbf{x}_i, \beta)) \mathbf{x}_i$ This algorithm is called _stochastic_ gradient descent, because the order in which steps are taken depend on the order in which we process observations, which is assumed to be stochastic. This is a full implementation of stochastic gradient descent for our example dataset: ```{r} # Implementation of stochastic gradient descent for least squares regression # for a single predictor (x) # # There is some code here that is only used to generate illustrative plots stochastic_gradient_descent <- function(x, y, tol=1e-6, maxit=50, plot=FALSE) { n <- length(y) # initialize estimate beta_1 <- 0; i <- 0; beta_keep <- NA # compute loss at first estimate loss <- compute_loss(beta_1, x, y); loss_keep <- NA # initial step size alpha <- 1e-3 difference <- Inf # check for convergence # (in practice a max number of iterations is used) while ((difference > tol) && (i < maxit)) { cat("it: ", i, " beta: ", round(beta_1, 2), "loss: ", round(loss, 2), " alpha: ", round(alpha, 6), "\n") # store last estimate to check convergence old_beta_1 <- beta_1 # iterate over observations for (j in seq(1,n)) { # add step to plot if (plot && !is.na(beta_keep) && !is.na(loss_keep)) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } # store last estimate and loss for plotting beta_keep <- beta_1; loss_keep <- loss; # update estimate with j-th observation f <- beta_1 * x[j] resid <- y[j] - f beta_1 <- beta_1 + alpha * resid * x[j] # compute loss with new estimate loss <- compute_loss(beta_1, x, y) } # check difference between current and old estimate # to check convergence difference <- (beta_1 - old_beta_1)^2 / old_beta_1^2 i <- i+1 # update step size if ((i %% 5) == 0) alpha <- alpha / 2 } if (plot) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } beta_1 } ``` Let's run this and see what it does: ```{r, echo=FALSE, fig.width=15, fig.height=10, cache=FALSE} plot(beta, sapply(beta, compute_loss, x=x, y=y), type="l", lwd=2, ylab=expression(L(beta[1])),cex.lab=1.5,xlab=expression(beta[1]), xlim=c(-20,20), main="Stochastic Gradient Descent") estimate <- stochastic_gradient_descent(x, y, plot=TRUE) ``` The stochastic gradient descent algorithm can easily adapt to _data streams_ where we receive observations one at a time and _assume_ they are not stored. This setting falls in the general category of _online_ learning. ### Distributed systems for data analysis The vast majority of the analyses we have done in class are for in-memory data: datasets that can be loaded onto memory of a single computing node. Database systems can execute SQL queries, which can be used for summarization and (some) model learning efficiently (e.g., trees and LDA), over data on disk relatively efficiently, but operations are usually performed by a single computing node. In the 90s database systems that operate over multiple computing nodes became available and were the basis of the first generation of large data warehousing. In the last decade, systems that manipulate data over multiple nodes have become standard. The basic observation is that for very large datasets, many of the operations we've seen for aggregation and summarization, which also form the basis of many learning methods, can be parallelized. For example: - partition observations and perform transformation on each partition as a parallel process - partition variables and perform transformation on each variable as a parallel process - for summarization (`group_by` and `summarize`), partition observations based on `group_by` expression, perform `summarize` on each partition. Efficiency of implementation of this type of parallelism depends on underlying architecture: Shared memory vs. Shared storage vs. Shared nothing. For massive datasets, last is usually preferred since fault tolerance is perhaps the most important consideration. ### Map-reduce Map-Reduce is an implementation idea for a shared nothing architecture. It is based on _distributed storage_, _data proximity_ (perform operaations on data that is physically close) and _fault tolerance_. Its basic computation paradigm is based on two operations: - reduce: perform operation on subset of observations in parallel - map: decide which parallel process (node) should operate on each observation The fundamental operations that we have learned very well in this class are nicely represented in this framework: `group_by` clause corresponds to `map`, and `summarize` function corresponds to `reduce`. ```{r, fig.width=8, fig.height=2.4, echo=FALSE} library(png) library(grid) img <- readPNG("mr1.png") grid.raster(img) ``` Map-reduce is most efficient when computations are organized in an acyclic graph. This way, data is moved from stable storage to computing process and the result moved to stable storage without much concern for operation ordering. This type of architecture provides runtime benefits due to flexible resource allocation and strong failure recovery. However, existing implementations of Map-reduce systems do not support interactive use, or workflows that are hard to represent as acyclic graphs. ### Spark Spark is a relatively recent system, based on the general map-reduce framework, for ultra-fast data analysis. It provides efficient support for interactive analysis (the kind we do in R) and it is designed to support iterative workflows needed by many Machine Learning algorithms. The basic data abstraction in Spark is the resilient distributed dataset (RDD). This permits applications to keep working sets of data in memory and support iterative algorithms and interactive workflows. They are: (1) inmutable and *partitioned* collections of objects, (2) created by parallel *transformations* on data in stable storage (e.g., map, filter, group_by, join, ...) (3) *cached* for efficient reuse (4) operated upon by actions defeind on RDDs (count, reduce, collect, save, ...) ### The components of a SPARK workflow **Transformations**: Define new RDDs [https://spark.apache.org/docs/latest/programming-guide.html#transformations](https://spark.apache.org/docs/latest/programming-guide.html#transformations) **Actions**: Return results to driver program [https://spark.apache.org/docs/latest/programming-guide.html#actions](https://spark.apache.org/docs/latest/programming-guide.html#actions) Spark was designed first for Java with an interactive shell based on Scala. It has strong support in Python and increasing support in R SparkR. - Spark programming guide: [https://spark.apache.org/docs/latest/programming-guide.html](https://spark.apache.org/docs/latest/programming-guide.html) - More info on SparkR: [http://amplab-extras.github.io/SparkR-pkg/](http://amplab-extras.github.io/SparkR-pkg/) - An R/Spark interface from RStudio based on dplyr: http://spark.rstudio.com/ ### Distributed stochastic gradient descent Gradient descent algorithms are easily parallelizable: - Split observations across computing units - For each step, compute partial sum for each partition (map), compute final update (reduce) $$ \beta^{k+1} = \beta^k + \alpha * \sum_{\mathrm{partition}\; p} \sum_{i \in p} (y_i - f(\mathbf{x_i}, \beta^k)) \mathbf{x}_i $$ This observation has resulted in their implementation in systems for large-scale learning: 1. [Vowpal Wabbit](https://github.com/JohnLangford/vowpal_wabbit/wiki) - Implements general framework of (sparse) stochastic gradient descent for many optimization problems - R interface: [http://cran.r-project.org/web/packages/RVowpalWabbit/index.html] 2. [Spark MLlib](https://spark.apache.org/docs/1.2.1/mllib-guide.html) - Implements many learning algorithms using Spark framework we saw previously - Some access to the MLlib API via R, but built on primitives accessible through `SparkR` library we saw previously <file_sep>/materials/lecture-notes/23-univariate_stats.Rmd # (Part) Statistical Learning {-} # Univariate distributions and statistics ```{r unistats_setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, cache=TRUE) ``` One of the purposes of this class is for you to learn Statistical and Machine Learning techniques commonly used in data analysis. By the end of the term, you should be able to read papers that use these methods critically and analyze data using these methods. When using any of these tools we will be we will be asking ourselves if our findings are "statistically significant". For example, if we make use of a classification algorithm to distinguish between to groups of entities and find that we can correctly predict a class in 70 out of our 100 cases, how can we determine if this could have happened by chance alone? To be able to answer these questions, we need to understand some basic probabilistic and statistical principles. In this section we will review some of these principles. ## Variation, randomness and stochasticity In the preceeding sections of the class we have not spoken too much about randomness and stochasticity. We have, however, spoken about _variation_. When we discussed the notion of _spread_ in a given dataset, measured by the sample standard deviation, for example, we are referring to the fact that in a population of entities (e.g., a set of tweets) there is naturally occuring variation in measurements (different frequency of word usage, for example). Notice that we can discuss the notation of _variation_ without referring to any randomness, stochasticity or noise. Why do we study probability then? Because, we _do_ want to distinguish, when possible, between natural occuring variation and randomness or stochasticity. For instance, suppose we want to learn something about education loan debt for 19-30 year olds in Maryland. We could find loan debt for **all** 19-30 year old Maryland residents, and calculate mean and standard deviation. But that's difficult to do for all residents. So, instead we sample (say by randomly sending Twitter surveys), and _estimate_ the average and standard deviation of debt in this population from the sample. Now, this presents an issue since we could do the same from a different random sample and get a different set of estimates. Why? Because there is naturally-occuring variation in this population. So, a simple question to ask is, how good are our _estimates_ of debt mean and standard deviation from sample of 19-30 year old Marylanders? In another example, suppose we build a predictive model of loan debt for 19-30 year old Marylanders based on other variables (e.g., sex, income, education, wages, etc.) from our sample. How good will this model perform when predicting debt in general? We use probability and statistics to answer these questions. We use probability to capture stochasticity in the sampling process and model naturally occuring variation in measurements in a population of interest. One final word, the term _population_ which we use extensively here means **the entire** collection of entities we want to model. This could include people, but also images, text, GO positions, etc. ### Random variables The basic concept in our discussion of probability is the _random variable_. Consider a situation where you are tasked with determining if a given tweet was generated by a bot. You sample a tweet at random from the set of all tweets ever written and have a human expert decide if it was generated by a bot or not. You can denote this as a _binary_ random variable $X \in \{0,1\}$, with value $1$ if the tweet is bot-gerneated and 0 otherwise. Why is this a random value? Because it depends on the tweet that was _randomly_ sampled. ## (Discrete) Probability distributions Now we can start talking about the distribution of values of a random variable. In our example, random variable $X$ can take values 0 or 1. We would like to specify how these values are distributed over the set of all possible tweets one can randomly sample. We use a probability distribution to do this. A _probability distribution_ is a function $P:\mathcal{D} \to [0,1]$ over set $\mathcal{D}$ of all values random variable $X$ can take to the interval $[0,1]$. The function $P$ describes how values of $X$ are distributed over domain $\mathcal{D}$. We start with a _probability mass function_ $p$ which must satisfy two properties: a. $p(X=x) \geq 0$ for all values $x \in mathcal{D}$, and b. $\sum_{x\in \mathcal{D}} p(X=x) = 1$ Now, how do we interpret quantity $p(X=1)$? a. $p(X=1)$ is the _probability_ that a uniformly random sampled tweet is bot-generated, which implies b. the proportion of bot-generated tweets in the set of "all" tweets is $p(X=1)$. I say "all" because it's really the set of tweets one could possibly sample. Armed with a _probability mass function_ we can talk about a _cumulative probability distribution_ that describes the sum of probability up to a given value. We saw a similar concept for the empirical distribution of data when we discussed quartiles. ### Example The oracle of TWEET Suppose we have a magical oracle and know for a _fact_ that 70% of "all" tweets are bot-generated. In that case $p(X=1) = .7$ and $p(X=0)=1-.7=.3$. ## Expectation What if I randomly sampled $n=100$ tweets? How many of those do I _expect_ to be bot-generated? _Expectation_ is a formal concept in probability: $$ \mathbb{E} X = \sum_{x\in \mathcal{D}} x p(X=x) $$ What is the expectation of $X$ (a single sample) in our tweet example? $$ 0 \times p(X=0) + 1 \times p(X=1) = \ 0 \times .3 + 1 \times .7 = .7 $$ Now, consider random variable $Y=X_1 + X_2 + \cdots + X_{100}$. What is $Y$? Remember we want to know the expected number of bot-generated tweets in a sample of $n=100$ tweets. We have $X_i=\{0,1\}$ for each of the $n=100$ tweets, each a random variable, which we obtained by uniformly and _independently_ sampling for the set of all tweets. With that, now random variable $Y$ equals the number of bot-generated tweets in my sample of $n=100$ tweets. In this case: $$ \begin{aligned} \mathbb{E} Y & = \mathbb{E} (X_1 + X_2 + \cdots + X_{100}) \\ {} & = \mathbb{E} X_1 + \mathbb{E} X_2 + \cdots + \mathbb{E} X_{100} \\ {} & = .7 + .7 + \cdots + .7 \\ {} & = 100 \times .7 \\ {} & = 70 \end{aligned} $$ This uses some facts about expectation you can show in general. (1) For any pair of random variables $X_1$ and $X_2$, $\mathbb{E} (X_1 + X_2) = \mathbb{E} X_1 + \mathbb{E} X_2$. (2) For any random variable $X$ and _constant_ a, $\mathbb{E} aX = a \mathbb{E} X$. ## Estimation Our discussion so far has assumed that we have access to an oracle that told us $p(X=1)=.7$, but we _don't_. For our tweet analysis task, we need to _estimate_ the proportion of "all" tweets that are bot-generated. This is where our probability model and the expectation we derive from it comes in. Given _data_ $x_1, x_2, x_3, \ldots, x_{100}$, with 67 of those tweets labeled as bot-generated (i.e., $x_i=1$ for 67 of them), we can say $y=\sum_i x_i=67$. Now from our discussion above, we _expect_ $y=np$ where $p=p(X=1)$, so let's use that observation to _estimate_ $p$! $$ \begin{aligned} np = 67 & \Rightarrow \\ 100p = 67 & \Rightarrow \\ \hat{p} = \frac{67}{100} & \Rightarrow \\ \hat{p} = .67 \end{aligned} $$ Our estimate is wrong, but close (remember we had an oracle of TWEET), but can we ever get it right? Can I say how wrong I should expect my estimates to be? Notice that our estimate of $p$, $\hat{p}$ is the sample _mean_ of $x_1,x_2,\ldots,x_n$. Let's go back to our oracle of tweet to do a thought experiment and replicate how we derived our estimate from 100 tweets a few thousand times. ```{r, echo=FALSE, message=FALSE} library(tidyverse) theme_set(theme_bw()) ``` ```{r} # proportion of bot-tweets in the the tweet population # as given by the oracle of TWEET p <- 0.7 # let's sample 100 tweets # this function chooses between values in a vector (0 and 1) # with probability given by vector prob # we need 100 samples from this vector with replacement # since there are fewer items in the vector than the size # of the sample we are making x <- sample(c(0,1), size=100, replace=TRUE, prob=c(1-p,p)) # compute the estimated proportion that are bot-generated (using the sample mean) phat <- mean(x) # if we had an oracle that let's us do this cheaply, # we could replicate our experiment 1000 times # (you don't in real life) # first let's write a function that gets an estimate # of proportion from a random sample get_estimate <- function(n, p=0.7) mean(sample(c(0,1), size=n, replace=TRUE, prob=c(1-p,p))) # let's make a vector with 1000 _estimates_ phats_100 <- replicate(1000, get_estimate(100)) # now let's plot a histogram of the hist(phats_100, xlab=expression(hat(p)), xlim=c(0.5,1), main="Distribution of p estimates from 100 tweets") ``` What does this say about our estimates of the proportion of bot-generated tweets if we use 100 tweets in our sample? Now what if instead of sampling $n=100$ tweets we used other sample sizes? ```{r} par(mfrow=c(2,3)) # what if we sample 10 tweets phats_10 <- replicate(1000, get_estimate(10)) hist(phats_10, main="10 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what if we sample 100 tweets phats_100 <- replicate(1000, get_estimate(100)) hist(phats_100, main="100 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what if we sample 500 tweets phats_500 <- replicate(1000, get_estimate(500)) hist(phats_500, main="500 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 1000 tweets phats_1000 <- replicate(1000, get_estimate(1000)) hist(phats_1000, main="1000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 5000 tweets phats_5000 <- replicate(1000, get_estimate(5000)) hist(phats_5000, main="5000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 10000 tweets phats_10000 <- replicate(1000, get_estimate(10000)) hist(phats_10000, main="10000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) ``` We can make a couple of observations: 1. The distribution of estimate $\hat{p}$ is _centered_ at $p=.7$, our unknown _population_ proportion, and 2. The _spread_ of the distribution depends on the number of samples $n$. This is an illustration of two central tenets of statistics that serves as the foundation of much of what we will do later in the course to interpret the models we build from data. ### Law of large numbers (LLN) Given _independently_ sampled random variables $X_1,X_2,\cdots,X_n$ with $\mathbb{E} X_i=\mu$ for all $i$, the LLN states that the sample mean $$ \frac{1}{n} \sum_i X_i \to \mu $$ _tends_ to the population mean (under some assumptions beyond the scope of this class) regardless of the distribution of the $X_i$. An implication of this is that using the sample mean is the right procedure to use to estimate parameters by matching their expected value! ### Central Limit Theorem (CLT) The LLN says that estimates built using the sample mean will tend to the correct answer, the CLT describes how these estimates are _spread_ around the correct answer. Here we will use the concept of _variance_ which is expected _spread_, measured in squared distance, from the _expected value_ of a random variable: $$ \mathrm{var(X)} = \mathbb{E} (X - \mathbb{E} X)^2 $$ Example: consider the variance of our random tweet example: $$ \begin{aligned} \mathrm{var(X)} & = \sum_{\mathcal{D}} (x-\mathbb{E} X)^2 p(X=x) \\ {} & = (0 - p)^2 \times (1-p) + (1 - p)^2 \times p \\ {} & = p^2(1-p) + (1-p)^2p \\ {} & = p(1-p) (p + (1-p)) \\ {} & = p(1-p) (p - p + 1) \\ {} & = p(1-p) \end{aligned} $$ Now, we can state the CLT: $$ \frac{1}{n} \sum_{i=1} X_i $$ tends _towards_ a **normal** distribution as $n \rightarrow \infty$. This says, that as sample size increases the distribution of sample means is _well_ approximated by a normal distribution. This means we can approximate the expected error of our estimates well. ## The normal distribution The normal distribution describes the distribution of _continuous_ random variables over the range $(-\infty,\infty)$ using two parameters: mean $\mu$ and standard deviation $\sigma$. We write "$Y$ is normally distributed with mean $\mu$ and standard deviation $\sigma$" as $Y\sim N(\mu,\sigma)$. We write its _probability density function_ as: $$ p(Y=y) = \frac{1}{\sqrt{2\pi}\sigma} \mathrm{exp} \left\{ -\frac{1}{2} \left( \frac{y-\mu}{\sigma} \right)^2 \right\} $$ Here are three examples of probability density functions of normal distributions with mean $\mu=60,50,60$ and standard deviation $\sigma=2,2,6$: ```{r} # 100 equally spaced values between 40 and 80 yrange <- seq(40, 80, len=100) # values of the normal density function density_values_1 <- dnorm(yrange, mean=60, sd=2) density_values_2 <- dnorm(yrange, mean=50, sd=2) density_values_3 <- dnorm(yrange, mean=60, sd=6) # now plot the function plot(yrange, density_values_1, type="l", col="red", lwd=2, xlab="y", ylab="density") lines(yrange, density_values_2, col="blue", lwd=2) lines(yrange, density_values_3, col="orange", lwd=2) legend("topright", legend=c("mean 60, sd 2", "mean 50, sd 2", "mean 60, sd 6"), col=c("red","blue","orange"), lwd=2) ``` Like the discrete case, probability density functions for continuous random variables need to satisfy certain conditions: a. $p(Y=y) \geq 0$ for all values $Y \in (-\infty,\infty)$, and b. $\int_{-\infty}^{\infty} p(Y=y) dy = 1$ One way of remembering the density function of the normal distribution is that probability decays exponentially with rate $\sigma$ based on squared distance to the mean $\mu$. (Here is squared distance again!) Also, notice the term inside the squared? $$ z = \left( \frac{y - \mu}{\sigma} \right) $$ this is the _standardization_ transformation we saw in previous lectures. In fact the name _standardization_ comes from the _standard normal distribution_ $N(0,1)$ (mean 0 and standard deviation 1), which is very convenient to work with because it's density function is much simpler: $$ p(Z=z) = \frac{1}{\sqrt{2\pi}} \mathrm{exp} \left\{ -\frac{1}{2} z^2 \right\} $$ In fact, if random variable $Y \sim N(\mu,\sigma)$ then random variable $Z=\frac{Y-\mu}{\sigma} \sim N(0,1)$. ### CLT continued We need one last bit of terminology to finish the statement of the CLT. Consider data $X_1,X_2,\cdots,X_n$ with $\mathbb{E}X_i= \mu$ for all $i$, **and** $\mathrm{sd}(X_i)=\sigma$ for all $i$, and their sample mean $Y=\frac{1}{n} \sum_i X_i$. The standard deviation of $Y$ is called the _standard error_: $$ \mathrm{se}(Y) = \frac{\sigma}{\sqrt{n}} $$ Ok, now we can make the CLT statement precise: the distribution of $Y$ tends _towards_ $N(\mu,\frac{\sigma}{\sqrt{n}})$ as $n \rightarrow \infty$. This says, that as sample size increases the distribution of sample means is well approximated by a normal distribution, and that the spread of the distribution goes to zero at the rate $\sqrt{n}$. Disclaimer: there a few mathematical subtleties. Two important ones are that a. $X_1,\ldots,X_n$ are iid (independent, identically distributed) random variables, and b. $\mathrm{var}X < \infty$ Let's redo our simulated replications of our tweet samples to illustrate the CLT at work: ```{r} # we can calculate standard error for each of the # settings we saw previously and compare these replications # to the normal distribution given by the CLT # let's write a function that adds a normal density # plot for a given sample size draw_normal_density <- function(n,p=.7) { se <- sqrt(p*(1-p))/sqrt(n) f <- dnorm(seq(0.5,1,len=1000), mean=p, sd=se) lines(seq(0.5,1,len=1000), f, col="red", lwd=1.6) } par(mfrow=c(2,3)) # what if we sample 10 tweets phats_10 <- replicate(1000, get_estimate(10)) hist(phats_10, main="10 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(10) # what if we sample 100 tweets phats_100 <- replicate(1000, get_estimate(100)) hist(phats_100, main="100 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(100) # what if we sample 500 tweets phats_500 <- replicate(1000, get_estimate(500)) hist(phats_500, main="500 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(500) # what about 1000 tweets phats_1000 <- replicate(1000, get_estimate(1000)) hist(phats_1000, main="1000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(1000) # what about 5000 tweets phats_5000 <- replicate(1000, get_estimate(5000)) hist(phats_5000, main="5000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(5000) # what about 10000 tweets phats_10000 <- replicate(1000, get_estimate(10000)) hist(phats_10000, main="10000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(10000) ``` Here we see the three main points of the LLN and CLT: (1) the normal density is centered around $\mu=.7$, (2) the normal approximation gets better as $n$ increases, and (3) the standard error goes to 0 as $n$ increases. ## The Bootstrap Procedure What if the conditions that we used for the CLT don't hold? For instance, samples $X_i$ may not be independent. What can we do then, how can we say something about the precision of sample mean estimate $Y$? A super useful procedure to use in this case is the bootstrap. It is based on using _randomization_ to simulate the stochasticity resulting from the population sampling procedure we are trying to capture in our analysis. The main idea is the following: given observations $x_1,\ldots,x_n$ and the estimate $y=\frac{1}{n}\sum_{i=1}^n x_i$, what can we say about the standard error of $y$? There are two challenges here: 1) our estimation procedure is deterministic, that is, if I compute the sample mean of a specific dataset, I will always get the same answer; and 2) we should retain whatever properties of estimate $y$ result from obtaining it from $n$ samples. The bootstrap is a randomization procedure that measures the variance of estimate $y$, thus using randomization to address challenge (1), but doing so with randomized samples of size $n$, addressing challenge (2). The procedure goes as follows: 1. Generate $B$ random datasets by sampling _with replacement_ from dataset $x_1,\ldots,x_n$. Denote randomized dataset $b$ as $x_{1b},\ldots,x_{nb}$. 2. Construct estimates from _each_ dataset, $y_b = \frac{1}{n}\sum_i x_{ib}$ 3. Computer center (mean) and spread (variance) of estimates $y_b$ Let's see how this works on tweet oracle example ```{r} # remember our dataset is in variable x # this is how we get one bootstrap replicate # sample n observations from dataset x _with replacement_ xb <- sample(x, length(x), replace=TRUE) # let's do B=100 bootstrap randomizations using the # replicate function (it just replicates the given expression # however many times it is directed to do so) B <- 200 xb <- replicate(B, sample(x,length(x), replace=TRUE)) # xb is a matrix with 100 rows (the original length of dataset) and # 200 columns (the number of replicates) # now let's compute the bootstrap estimates y yb <- colMeans(xb) # and make a histogram of the bootstrap estimates hist(yb, probability=TRUE, main="Histogram of bootstrap estimates", xlab="Bootsrap Estimates",xlim=c(0.5,1)) abline(v=p, col="blue") draw_normal_density(100) ``` Now, let's a case where we don't expect the normal approximation to not work so well by making samples not identically distributed. Let's make a new ORACLE of tweet where the probability of a tweet being bot-generated can be one of two values (.7 and .4): ```{r} create_mixture_dataset <- function(n=100,p=c(.7,.4)) { tweets1 <- sample(c(0,1), size=n, prob=c(1-p[1],p[1]), replace=TRUE) tweets2 <- sample(c(0,1), size=n, prob=c(1-p[2],p[2]),replace=TRUE) ifelse(runif(n)<=.5, tweets1, tweets2) } mixture_x <- create_mixture_dataset(100) # Now let's do the same bootstrap procedure in this case xb <- replicate(B, sample(mixture_x,length(mixture_x), replace=TRUE)) # xb is a matrix with 100 rows (the original length of dataset) and # 200 columns (the number of replicates) # now let's compute the bootstrap estimates y yb <- colMeans(xb) # and make a histogram of the bootstrap estimates hist(yb, probability=TRUE, main="Histogram of bootstrap estimates", xlab="Bootsrap Estimates", xlim=c(0,1)) draw_normal_density <- function(n,p=.7) { se <- sqrt(p*(1-p))/sqrt(n) f <- dnorm(seq(0,1,len=1000), mean=p, sd=se) lines(seq(0,1,len=1000), f, col="red", lwd=1.6) } draw_normal_density(100, mean(mixture_x)) ``` Here, an analysis based on the classical CLT would not be appropriate, but the bootstrap analysis gives some information about the variability of our estimates. <file_sep>/materials/quizzes/iris_wrangling.Rmd --- title: "dplyr exercise" author: "CMSC320" date: "September 21, 2016" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Consider the following dataset of iris measurements (this is a very historically important dataset, btw): ```{r, message=FALSE} library(dplyr) data(iris) iris <- as_data_frame(iris) iris ``` `Species` is a categorical variable indicating iris species for each flower measured. Using `dplyr` code compute: 1. the average `Petal.Length` of each iris species 2. the average of all four numeric variables for each iris species ```{r} iris %>% group_by(Species) %>% summarize(mean(Petal.Length, na.rm=TRUE)) ``` <file_sep>/materials/lectures/TreeMethods/TreeMethods.Rmd --- title: "TreeMethods" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` We saw in previous units the limitation of using linear methods for classification. In particular, the partition of predictor space into regions according to conditional class probabilities is very limited. In this unit, we look at a set of elegant and versatile methods that allow these regions to take more complex shapes, but still produce models that are interpretable. These are very popular, well-known and studied methods in Statistical Learning. We will concentrate on Regression and Decision Trees and their extension to Random Forests. ### Regression Trees Consider the dataset we saw in our linear regression unit. We found using linear regression that a linear model of weight vs. miles per gallon was not a good fit. ```{r, echo=FALSE, cache=FALSE} library(tree) library(ISLR) library(RColorBrewer) palette(brewer.pal(8, "Dark2")) data(Auto) with(Auto, plot(weight, mpg, pch=19, cex=1.4)) ``` Let's take a look at what a regression tree estimates in this case. ```{r} tree <- tree(mpg~weight, data=Auto) plot(tree) text(tree, pretty=0, cex=1.3) ``` The decision trees partitions the `weight` predictor into regions based on its value. We can show this graphically as below. The idea behind the regression tree is that outcome $Y$ (`mpg` in this case) is estimated (or predicted) to be it's mean _within each of the data partitions_. Think of it as the conditional mean of $Y$ where conditioning is given by this region partitioning. ```{r, echo=FALSE, cache=FALSE, results="hide"} library(RColorBrewer) palette(brewer.pal(8, "Dark2")) with(Auto, plot(weight, mpg, pch=19, cex=1.4)) #abline(h=subset(tree$frame, grepl("leaf", tree$frame$var))$yval) abline(v=as.numeric(gsub("<", "", subset(tree$frame, !grepl("leaf", tree$frame$var))$splits[,"cutleft"]))) process_node <- function(i, left, right) { if (tree$frame$var[i] == "<leaf>") { val <- as.numeric(tree$frame$yval[i]) segments(left, val, right, val, col="red", lwd=5) } else { val <- as.numeric(gsub("<","",tree$frame$splits[i, "cutleft"])) i <- process_node(i+1, left, val) i <- process_node(i+1, val, right) } i } process_node(1, .85*min(Auto$weight), 1.05*max(Auto$weight)) ``` Regression and decision trees operate by prediction an outcome variable $Y$ by partitioning feature (predictor) space. The regression tree model then: 1. Partitions space into $J$ non-overlapping regions, $R_1, R_2, \ldots, R_J$. 2. For every observation that falls within region $R_j$, predict response as mean of response for training observations in $R_j$. The important observation is that **Regression Trees create partition recursively** For example, consider finding a good predictor $j$ to partition space its axis. A recursive algorithm would look like this: 1. Find predictor $j$ and value $s$ that minimize RSS: $$ \sum_{i:\, x_i \in R_1(j,s))} (y_i - \hat{y}_{R_1})^2 + \sum_{i:\, x_i \in R_2(j,s))} (y_i - \hat{y}_{R_2})^2 $$ Where $R_1$ and $R_2$ are regions resulting from splitting observations on predictor $j$ and value $s$: $$ R_1(j,s) = \{X|X_j < s\} \mathrm{ and } R_2(j,s) \{X|X_j \geq s\} $$ This is then applied recursively to regions $R_1$ and $R_2$. Within each region a prediction is made using $\hat{y}_{R_j}$ which is the mean of the response $Y$ of observations in $R_j$. ![](8.3.png) Consider building a model that used both `horsepower` and `weight`. In this plot the value of the response $Y$ is indicated by the size of the point. ```{r, echo=FALSE} with(Auto, { plot(horsepower, weight, cex=mpg/median(mpg), pch=19) qs <- quantile(mpg, p=seq(0,1, len=5)) legend("bottomright", pch=19, legend=qs, pt.cex=qs/median(mpg)) }) ``` This is what a decision tree would look like for these two predictors: ```{r} tree <- tree(mpg~horsepower+weight, data=Auto) plot(tree) text(tree, pretty=0) ``` ```{r, echo=FALSE, cache=FALSE} process_node <- function(i, j, left, right, bottom, top, dat) { var <- as.character(tree$frame$var[i]) is_leaf <- grepl("leaf", var) if (is_leaf) { val <- as.numeric(tree$frame$yval[i]) dat[j,] <- c(j, left, right, bottom, top, val) j <- j + 1 } else { val <- as.numeric(gsub("<","",tree$frame$splits[i, "cutleft"])) if (var == "horsepower") { res <- process_node(i+1, j, left, val, bottom, top, dat) i <- res$i; j <- res$j; dat <- res$dat res <- process_node(i+1, j, val, right, bottom, top, dat) i <- res$i; j <- res$j; dat <- res$dat } else { res <- process_node(i+1, j, left, right, bottom, val, dat) i <- res$i; j <- res$j; dat <- res$dat res <- process_node(i+1, j, left, right, val, top, dat) i <- res$i; j <- res$j; dat <- res$dat } } list(i=i, j=j, dat=dat) } nleaves <- sum(grepl("leaf", tree$frame$var)) region_dat <- data.frame(j=integer(nleaves), left=numeric(nleaves), right=numeric(nleaves), bottom=numeric(nleaves), top=numeric(nleaves), val=numeric(nleaves)) res <- process_node(1, 1, .85*min(Auto$horsepower), 1.05*max(Auto$horsepower), .85*min(Auto$weight), 1.05*max(Auto$weight), region_dat) region_dat <- res$dat with(Auto, { plot(horsepower, weight, cex=mpg/median(mpg), pch=19) qs <- quantile(mpg, p=seq(0,1, len=5)) legend("bottomright", pch=19, legend=qs, pt.cex=qs/median(mpg)) }) with(region_dat, { segments(left, bottom, right, bottom) segments(left, top, right, top) segments(left, bottom, left, top) segments(right, bottom, right, top) text(.5*(left+right), .5*(top+bottom), labels=j, cex=4, col="red") }) ``` ```{r, echo=FALSE} plot(tree) text(tree, pretty=0) ``` Regression trees are built in R using a similar interface as linear models ```{r, eval=FALSE} library(tree) library(ISLR) data(Auto) tree_fit <- tree(mpg~horsepower+weight, data=Auto) predict(tree_fit) ``` ### Specifics of the regression tree algorithm The recursive partitioning algorithm described above leads to a set of natural questions: _When do we stop partitioning?_ We stop when adding a partition does not reduce RSS, or, when partition has too few training observations. Even then, trees built with this stopping criterion tend to _overfit_ training data. To avoid this, a post-processing step called _pruning_ is used to make the tree smaller. **Question:** why would a smaller tree tend to generalize better? Chapter 8 on the ISLR book, goes into specifics of how to prune regression trees. Let's compare however, how do regression trees of different depths perform on both training and testing data. ```{r, echo=TRUE} set.seed(1234) train_indices <- sample(nrow(Auto), nrow(Auto)/2) train_set <- Auto[train_indices,] test_set <- Auto[-train_indices,] auto_tree <- tree(mpg~cylinders+displacement+horsepower+weight+acceleration+year+factor(origin), data=train_set) plot(auto_tree) text(auto_tree, pretty=0, cex=1.4) ``` The `cv.tree` function is used to determine a reasonable tree depth for the given dataset. For this dataset it seems that a depth of 6 works well since error for depth 6 is similar to error for the full size tree (depth 10), and shallower trees are preferred due to the reduced complexity of the prediction function. ```{r, echo=TRUE} cv_auto <- cv.tree(auto_tree) plot(cv_auto$size, cv_auto$dev, type="b", xlab="Tree Size", ylab="RSS") ``` ### Classification (Decision) Trees Classification, or decision trees, are used in classification problems, where the outcome is categorical. The same partitioning principle, but now, each region predicts the majority class for training observations within region. The recursive partitioning algorithm we saw previosuly requires a score function to choose predictors (and values) to partition with. In classification we could use a naive approach of looking for partitions that minimize training error. However, better performing approaches use more sophisticated metrics. Here are two of the most popular (denoted for leaf $m$): - **Gini Index**: $\sum_{k=1}^K \hat{p}_{mk}(1-\hat{p}_{mk})$, or - **Entropy**: $-\sum_{k=1}^K \hat{p}_{mk}\log(\hat{p}_{mk})$ where $\hat{p}_{mk}$ is the proportion of training observations in partition $m$ labeled as class $k$. Both of these seek to partition observations into subsets that have the same labels. Let's look at how a classification tree performs on the credit card default dataset we saw before. ```{r, echo=TRUE} data(Default) with(Default, { plot(balance, income, pch=ifelse(student=="Yes", 19, 21), col=default) legend("topright", pch=c(19,21,19,19), col=c("black","black",1,2), legend=c("Student", "Not Student","Not Default","Default")) }) ``` ```{r, echo=TRUE} default_tree <- tree(default~student+balance+income, data=Default) plot(default_tree) text(default_tree, pretty=0) ``` ```{r, echo=TRUE} default_tree ``` Classification trees have certain advantages that make them very useful. They are highly interpretable, even moreso than linear models. Are easy to visualize (if small enough), they (maybe) model human decision processes and don't require that dummy predictors for categorical variables are used. On the other hand, the greedy approach via recursive partitioning is a bit harder to train than linear regression. It may not always be the best performing method since it is not very flexible and are highly unstable to changes in training data. ### Random Forests Random Forests are a **very popular** approach that addresses these shortcomings via resampling of the training data. Their goal is to improve prediction performance and reduce instability by _averaging_ multiple decision trees (a forest constructed with randomness). It uses two tricks to accomplish this. The first trick is *Bagging* (bootstrap aggregation) General scheme: 1. Build many decision trees $T_1, T_2, \ldots, T_B$ from training set 2. Given a new observation, let each $T_j$ predict $\hat{y}_j$ 3. For regression: predict average $\frac{1}{B} \sum_{j=1}^B \hat{y}_j$, for classification: predict with majority vote (most frequent class) But wait, how do we get many decision trees from a single training set? For this we use a clever resampling technique called the _bootstrap_. To create $T_j, \, j=1,\ldots,B$ from training set of size $n$: a) create a bootstrap training set by sampling $n$ observations from training set **with replacement** b) build a decision tree from bootstrap training set ![](bootstrap.png) The second trick used in Random Forests is to use a random selection of features to split when deciding partitions. Specifically, when building each tree $T_j$, at each recursive partition only consider a randomly selected subset of predictors to check for best split. This reduces correlation between trees in forest, improving prediction accuracy. Let's look at our auto dataset again ```{r, echo=TRUE} set.seed(1234) train_indices <- sample(nrow(Auto), nrow(Auto)/2) train_set <- Auto[train_indices,] test_set <- Auto[-train_indices,] library(randomForest) auto_rf <- randomForest(mpg~cylinders+displacement+horsepower+weight+acceleration+year+origin, importance=TRUE, mtry=3, data=train_set) ``` Let's plot the predicted miles per gallon given by this model compared to the observed miiles per gallon in the training dataset. ```{r} plot(train_set$mpg, predict(auto_rf, newdata=train_set), xlab="Observed MPG", ylab="Predicted MPG", main="RF Training Error") abline(0,1) rmse <- sqrt( mean( (train_set$mpg - predict(auto_rf, newdata=train_set) )^2 )) legend("bottomright", legend=paste("RMSE=", round(rmse, digits=2)), cex=2) ``` Now let's look at the same plot on a _testing_ dataset. ```{r, echo=TRUE} plot(test_set$mpg, predict(auto_rf, newdata=test_set), xlab="Observed MPG", ylab="Predicted MPG", main="RF Testing Error") abline(0,1) rmse <- sqrt( mean( (test_set$mpg - predict(auto_rf, newdata=test_set) )^2 )) legend("bottomright", legend=paste("RMSE=", round(rmse, digits=2)), cex=2) ``` A disadvantage of random forests is that we lose interpretability. However, we can use the fact that a bootstrap sample was used to construct trees to measure _variable importance_ from the random forest. Here is a table of _variable importance_ for the random forest we just constructed. ```{r, echo=TRUE, results="asis"} variable_importance <- importance(auto_rf) knitr::kable(head(round(variable_importance, digits=2))) ``` And a barplot of the same data. ```{r, echo=FALSE} imp <- importance(auto_rf)[,2] par(mar=par()$mar+c(0,5,0,0)) o <- order(imp) barplot(imp[o], horiz=TRUE, xlab="Variable Importance", las=2, cex.names=1.6) ``` ### Tree-based methods summary Tree-based methods are very interpretable _prediction_ models. For which some inferential tasks are possible (e.g., variable importance in random forests), but are much more limited than the linear models we saw previously. These methods are very commonly used across many application domains and Random Forests often perform at state-of-the-art for many tasks. <file_sep>/materials/lectures/Wrangling/wrangling_sql.Rmd --- title: 'Data Wrangling: SQL' author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` _This material is based on <NAME>'s notes: [https://github.com/umddb/datascience-fall14/blob/master/lecture-notes/relational.md](https://github.com/umddb/datascience-fall14/blob/master/lecture-notes/relational.md)_ In the previous section we looked at common operations used in data manipulation using the `dplyr` package. In this section we go over the same manipulation operations using SQL (Structured Query Language). We then conclude with other SQL constructs beyond these common manipulation operations. First, however, a quick overview of database technology and SQL. Database Management Systems were introduced by <NAME> in the late 60's -- early 70's. They are centered around the relational data model (which we saw before) and seeks to achieve the following: - Separation of logical, physical data models (data independence) - Declarative query languages (as opposed to imperative programming) - Formal semantics (e.g., the ER Data Model) - Query optimization (key to commercial success) - 1st prototypes: - Ingres -> CA - Postgres -> Illustra -> Informix -> IBM - System R -> Oracle, DB2 As a reminder, the key components of the Relational Data Model are: - Relations (Tables) - Rows, or Tuples, or Records, Entities - Columns, or Attributes - Schema Remember also the distinction between a _Relation Schema_ (a list of attributes and their domain) and a _Relation Instance_ (a particular instantiation of a relation with actual values, which will change over time). ### Formal introduction to keys ![](schema.png) - Attribute set $K$ is a **superkey** of relation $R$ if values for $K$ are sufficient to identify a unique tuple of each possible relation $r(R)$ - Example: `{ID}` and `{ID,name}` are both superkeys of *instructor* - Superkey $K$ is a **candidate key** if $K$ is minimal - Example: `{ID}` is a candidate key for Instructor - One of the candidate keys is selected to be the **primary key** - Typically one that is small and immutable (doesn’t change often) - Primary key typically highlighted - **Foreign key**: Primary key of a relation that appears in another relation - `{ID}` from *student* appears in *takes, advisor* - *student* called referenced relation - *takes* is the referencing relation - Typically shown by an arrow from referencing to referenced - **Foreign key constraint**: the tuple corresponding to that primary key must exist - Imagine: - Tuple: `('student101', 'CMSC302') `in *takes* - But no tuple corresponding to 'student101' in *student* - Also called referential integrity constraint #### Keys: Examples - Married(person1-ssn, person2-ssn, date-married, date-divorced) - Account(cust-ssn, account-number, cust-name, balance, cust-address) - RA(student-id, project-id, superviser-id, appt-time, appt-start-date, appt-end-date) - Person(Name, DOB, Born, Education, Religion, ...) - Information typically found on Wikipedia Pages - President(name, start-date, end-date, vice-president, preceded-by, succeeded-by) - Info listed on Wikipedia page summary - Rider(Name, Born, Team-name, Coach, Sponsor, Year) - Tour de France: Historical Rider Participation Information ### SQL Basics Overview The Structured Query Language (SQL) is both a _Data Definition Language_ and a _Data Manipulation Language_ ```sql CREATE TABLE <name> ( <field> <domain>, ... ) INSERT INTO <name> (<field names>) VALUES (<field values>) DELETE FROM <name> WHERE <condition> UPDATE <name> SET <field name> = <value> WHERE <condition> SELECT <fields> FROM <name> WHERE <condition> ``` Consider the following example schema: - Movie(title, year, length, inColor, studioName, producerC#) - StarsIn(movieTitle, movieYear, starName) - MovieStar(name, address, gender, birthdate) - MovieExec(name, address, cert#, netWorth) - Studio(name, address, presC#) ![](movies-schema.png) We use SQL as a _Data Definition Language_ to define this schema: ```sql create table movieExec ( name char(30), address char(100), cert# integer primary key, networth integer ); create table movie ( title char(100), year integer, length integer, inColor smallint, studioName char(20), producerC# integer references movieExec(cert#) ); ``` - Must define movieExec before movie. Why ? - Notice that we define _types_ (we haven't done that explicitly in R) Statements to add or remove data from a relation: ```sql insert into StarsIn values('King Kong', 2005, '<NAME>'); insert into StarsIn(starName, movieTitle, movieYear) values('<NAME>', 'King Kong', 2005); delete from movies where movieYear < 1980; ``` ### SQL Constructs: Single Table Queries Here we will see the same operations we saw with `dplyr`, but there is a fundamental difference. SQL is a declarative language, we don't write how to get the answer we want, we declare the answer we want. The actual execution is determined and optimized by the database engine. However, there are clear mappings between parts of SQL queries and the operations we used in `dplyr`. The basic construct in SQL is the so-called `SFW` construct: _select-from-where_ which specifies: - _select_: which attributes you want the answer to have - _from_: which relation (table) you want the answer to be computed from - _where_: what conditions you want to be satisfied by the rows (tuples) of the answer E.g.: movies produced by disney in 1990: note the *rename* ```sql select m.title, m.year from movie m where m.studioname = 'disney' and m.year = 1990 ``` The **select** clause can contain expressions - `select title || ' (' || to_char(year) || ')' as titleyear` - `select 2014 - year` The **where** clause support a large number of different predicates and combinations thereof - `year between 1990 and 1995` - `title like 'star wars%'` ` title like 'star wars _'` We can include ordering, e.g., find distinct movies sorted by title ```sql select distinct title from movie where studioname = 'disney' and year = 1990 order by title; ``` ### Group-by and summarize As expected, SQL has an idiom for grouping and summarizing (_conditioning_ as we called it before). Remember this is a very important concept that shows up in many data processing platforms - What it does: Partition the tuples by the group attributes (*year* in this case), and do something (*compute avg* in this case) for each group - Number of resulting tuples == Number of groups E.g., compute the average movie length by year ```sql select name, avg(length) from movie group by year ``` ### Subqueries You can nest queries as an expression in an SFW query. We refer to these "subqueries" as "nested subquery": E.g., find movie with the maximum length ```sql select title, year from movie where movie.length = (select max(length) from movie); ``` E.g., find movies with at least 5 stars: an example of a correlated subquery ```sql select * from movies m where 5 >= (select count(*) from starsIn si where si.title = m.title and si.year = m.year); ``` The nested subquery counts the number of actors for that movie. E.g., rank movies by their length. ```sql select title, year, (select count(*) from movies m2 where m1.length <= m2.length) as rank from movies m1; ``` Key insight: A movie is ranked 5th if there are exactly 4 movies with longer length. Most database systems support some sort of a *rank* keyword for doing this. Notice that the above query doesn't work in presence of ties etc. ### Exercise The `dplyr` package has a nice interface to database systems as well. As you may imagine, mapping the `dplyr` single and two-table verbs to SQL can be pretty straight forward. However, it also allows you to run SQL queries on these databases directly. See more information [in this introduction](https://cran.r-project.org/web/packages/dplyr/vignettes/databases.html). As an exercise you should try to answer our `dplyr` exercise questions using SQL queries and compare the results. As a reminder, here are the six questions: *Question 1*: Filter flights to include only flights with an arrival delay greater than 2 hours (delays are recorded in minutes). *Question 2*: Select columns in the flights table that contain delay times (note that the function `matches` can be used to select columns with names that partially match a given string. See `?dplyr::select`) *Question 3*: Find the minimum arrival delay (this can be a negative number) in the flights table. Use `summarize`. *Question 4*: Find minimum arrival delay (again, can be a negative number) in the flights table for each _destination_ airport. Use `group_by` and `summarize`. *Question 5*: List the name of **all** airlines and the number of flights for each airline in flights table. The `airlines` table contains airline names, so you will have to perform a join operation. *Question 6*: (a) Create a new column (use `mutate`) with total delay time for each flight. (b) Replace any missing _total delay times_ with the average (`mean`) total delay. (c) Same as (b) but now replace any missing _total delay times_ with the average (`mean`) total delay for the flight's route (i.e., origin/destination combination) ### Other useful SQL constructs - Set operations ```sql select name from movieExec union/intersect/minus select name from movieStar ``` - Set Comparisons ```sql select * from movies where year in [1990, 1995, 2000]; select * from movies where year not in ( select extract(year from birthdate) from MovieStar ); ``` ### SQL Constructs: Multi-table Queries Key idea: - Do a join to get an appropriate table - Use the constructs for single-table queries You will get used to doing all at once - Examples: ```sql select title, year, me.name as producerName from movies m, movieexec me where m.producerC# = me.cert#; ``` - Consider the query: ```sql select title, year, producerC#, count(starName) from movies, starsIn where title = starsIn.movieTitle and year = starsIn.movieYear group by title, year, producerC# ``` - What about movies with no stars ? - Need to use **outer joins** ```sql select title, year, producerC#, count(starName) from movies left outer join starsIn on title = starsIn.movieTitle and year = starsIn.movieYear group by title, year, producerC# ``` As we saw before, all tuples from 'movies' that have no matches in starsIn are included with NULLs (in `dplyr` this was `NA`). So, if a tuple `(m1, 1990)` has no match in `starsIn`, we get `(m1, 1990, NULL)` in the result and the `count(starName)` works correctly then. Note however that `count(*)` would not work correctly (NULLs can have unintuitive behavior) ### Other SQL Constructs #### Views ```sql create view DisneyMovies select * from movie m where m.studioname = 'disney'; ``` Can use it in any place where a tablename is used. Views are used quite extensively to: (1) simplify queries, (2) hide data (by giving users access only to specific views). Views maybe *materialized* or not. #### NULLs Value of any attribute can be NULL if value is unknown, or it is not applicable, or hidden, etc. It can lead to counterintuitive behavior. For example, the following query does not return movies where `length = NULL` ```sql select * from movies where length >= 120 or length <= 120` ``` Aggregate operations can be especially tricky when NULLs are present. ### DBMS are systems Database management systems are software applications designed for very efficient manipulation of data targeting a relatively small number of operations. Since they are also defined to operate over a fairly restrictive data model, they are extremely useful in situations where data consistency and safety are required. Here are some examples of capabilities found in DBMS that help in that regard: - Transactions - A transaction is a sequence of queries and update statements executed as a single unit - For example, transferring money from one account to another - Both the *deduction* from one account and *credit* to the other account should happen, or neither should - Triggers - A trigger is a statement that is executed automatically by the system as a side effect of a modification to the database - Integrity Constraints - Predicates on the database that must always hold - Key Constraints: Specifiying something is a primary key or unique ```sql CREATE TABLE customer ( ssn CHAR(9) PRIMARY KEY, cname CHAR(15), address CHAR(30), city CHAR(10), UNIQUE (cname, address, city)); ``` Attribute constraints: Constraints on the values of attributes `bname char(15) not null` `balance int not null, check (balance >= 0)` - Referential integrity: prevent dangling tuples ```sql CREATE TABLE branch(bname CHAR(15) PRIMARY KEY, ...); CREATE TABLE loan(..., FOREIGN KEY bname REFERENCES branch); ``` - Can tell the system what to do if a referenced tuple is being deleted - Global Constraints - Single-table ```sql CREATE TABLE branch (..., bcity CHAR(15), assets INT, CHECK (NOT(bcity = ‘Bkln’) OR assets > 5M)) ``` - Multi-table ```sql CREATE ASSERTION loan-constraint CHECK (NOT EXISTS ( SELECT * FROM loan AS L WHERE NOT EXISTS( SELECT * FROM borrower B, depositor D, account A WHERE B.cname = D.cname AND D.acct_no = A.acct_no AND L.lno = B.lno))) ``` <file_sep>/materials/classroom-scripts/cmsc320_class-script_20190207.R library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") arrest_tab # data arrest_tab %>% group_by(district) %>% summarize(num_arrest = n()) %>% # mapping data attribute to # graphical attribute ggplot(aes(y=num_arrest, x=district)) + geom_bar() + labs(title="Example Plot") # Make a box plot # showing the distribution of ages # for arrests for the SOUTHERN district # conditioned on sex. # Data (operation pipeline) arrest_tab %>% filter(district == "SOUTHERN") %>% select(sex, age) %>% group_by(sex) %>% summarize(mean_age=mean(age, na.rm=TRUE)) %>% # Mapping ggplot(aes(x=sex, y=mean_age)) + # Geometric Representation geom_boxplot() <file_sep>/materials/lectures/Wrangling/wrangling_exercise.Rmd --- title: "Wrangling Exercise" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- This is an exercise on data wrangling using the flights dataset: ```{r, eval=FALSE} library(dplyr) library(nycflights13) data(flights) data(airlines) ``` 1. Filter flights to include only flights with an arrival delay greater than 2 hours (delays are recorded in minutes) 2. Select columns in the flights table that contain delay times (note that the function `matches` can be used to select columns with names that matches a regular expression. See `?dplyr::select`) 3. Find the minimum arrival delay (this can be a negative number) in the flights table. Use `summarize`. 4. Find minimum arrival delay (again, can be a negative number) in the flights table for each destination airport. Use `group_by' and 'summarize`. 5. List the name of all airlines and the number of flights for each airline in flights table. The `airlines` table has airline names. 6. (a): create a new column (use 'mutate') with total delay time for each flight (b): replace any missing *total delay times* with the average total delay time of the remiaining flights. <file_sep>/content/lecture-note/best_practices/index.md --- date: 2016-09-14T13:17:30-04:00 title: Best Practices --- Tips for organizing and managing data science projects [Slides](BestPractices.pdf) <file_sep>/content/lecture-note/rintro/index.md --- date: 2016-08-30T22:23:52-04:00 title: R for Data Science Primer --- An introduction to the R programming language and data analysis environment Part I: ([html](RIntro_lab/)) ([Rmarkdown](RIntro_lab.Rmd)) Part II: ([html](RIntro_lab2/)) ([Rmarkdown](RIntro_lab2.Rmd)) Part III: A short note about pipes ([html](pipes/)) ([Rmarkdown](pipes.Rmd)) ## Additional Resources - [Course resource page]({{< baseurl >}}resources/#r) - Zumen & Mount, Appendix A - <NAME>. [Advanced R](http://adv-r.had.co.nz/) - [Rmarkdown](http://rmarkdown.rstudio.com/) <file_sep>/materials/homeworks/midtermII_review.md --- title: Midterm 2 material author: CMSC 320 geometry: margin=1in fontfamily: utopia --- This document describes material that will be fair game in the second midterm exam. ## Exploratory Data Analysis ### Summary Statistics - Distributional characteristics: range, central tendency, spread - Statistical summaries: sample mean, sample median, sample standard deviation - The derivation of the mean as an _optimal_ central tendency statistic - The five-number summary of data and relationship to boxplot - Statistical summaries of pairwise relationship between variables: sample covariance and correlation ### Visualization for EDA - Plots to show data distribution for one variable/two variables - The data/aesthetic mapping/geometric representation scheme for data visualization (ggplot) ### Data transformations - Centering and scaling data transformation (standardization) - Standard units - Ways of discretizing continuous numeric data - Relationship between arithmetic and geometric mean - The logarithmic transformation for skewed data ## Introduction to Statistical Learning - The "inverse problem" way of thinking about data analysis - Properties of discrete probability distributions - Expectation for discrete probability distributions - How the sample mean is an _estimate_ of expected value - The law of large numbers and the central limit theorem - The Bootstrap procedure - The Bernoulli, Binomial and Normal distributions - Using the CLT to get a confidence interval for the mean - Using the CLT to test a simple hypothesis about the mean - Application to A/B Testing - Joint and conditional distribution for discrete probability distributions - Bayes Rule - Independence - Conditional expectation for discrete probability distributions ## Linear models for regression - The linear regression model - Estimating linear regression parameters by minimizing residual sum of squares (RSS) - Diagnostic plots for linear regression - How to encode categorical predictors in a linear regression model, and how to interpret their coefficient estimates - How to incorporate and interpret predictor interactions in a linear regression model - Constructing a confidence interval for a parameter estimate in the linear regression model. - The R^2 measure to assess global fit in a regression model - What is co-linearity ## Linear models for classification - What is a classification problem? - Understanding classification as a probability estimation problem. - Why shouldn't you use linear regression (for continuous outcomes) to predict outcome for a binary categorical variable - What is log-odds? How do we transform log-odds to probabilities? - How is the logistic regression problem defined. - How do we calculate error rate for a classification problem? - What are False positive and false negative errors? - What is the False positive rate? True positive rate? - What are precision and recall? ## Gradient descent - Gradient descent for linear regression - The Maximum Likelihood principle - Gradient descent for logistic regression # Midterm Structure The midterm will consist of two sections: ~5 multiple choice questions, and 2 longer questions. The midterm will be take-home, and you will have 5 days to complete. All resources are at your disposal (lecture notes, recordings, etc.). The code of academic integrity still applies. This is to be done independently. <file_sep>/data/calendar.toml [day01] date = "1/27" [[day01.covers]] title = "What is Data Science, Course Introduction and Logistics" notes = "bookdown-notes/introduction-and-overview.html" [[day01.covers]] title = "The Movie Taxonomy Analysis" notes = "bookdown-notes/an-illustrative-analysis.html" [[day01.covers]] title = "Lecture Slides: Introduction and Overview" notes = "slides/cmsc320_intro.pdf" [day02] date = "1/29" [[day02.covers]] title = "Preliminaries 1: Setting up" notes = "bookdown-notes/setting-up-the-data-science-toolbox.html" [[day02.covers]] title = "Preliminaries 1: Measurement and data types" notes = "bookdown-notes/measurements-and-data-types.html" [[day02.covers]] title = "Lecture Slides: Measurement and data types" notes = "slides/cmsc320_measurement-datatypes.pdf" [[day02.covers]] title = "Preliminaries 2: Basic data operations" notes = "bookdown-notes/principles-basic-operations.html" [[day02.covers]] title = "Lecture Slides: Basic data operations" notes = "slides/cmsc320_operations.pdf" [[day02.covers]] title = "Python notebook for data operations" notes = "ipynb/data_operations.ipynb" # [day02.class_notes] # link = "cmsc320_class-notes_20180130.pdf" [day03] date = "2/3" [[day03.covers]] title = "Preliminaries 2: More data operations" notes = "bookdown-notes/principles-more-operations.html" [[day03.covers]] title = "Lecture Slides: More data operations" notes = "slides/cmsc320_operations.pdf" [[day03.covers]] title = "Basic data plotting" notes = "bookdown-notes/basic-plotting-with-ggplot.html" [[day03.covers]] title = "Lecture Slides: Basic data plotting" notes = "slides/cmsc320_basic-plotting.pdf" [[day03.covers]] title = "Introduction to literate programming: Notebooks" notes = "bookdown-notes/brief-introduction-to-rmarkdown.html" # [day03.class_notes] # link = "cmsc320_class-notes_20180201.pdf" [day04] date = "2/5" [[day04.covers]] title = "Guest Lecture (<NAME>): Best Practices for Data Science Projects [slides in pdf]" notes = "slides/cmsc320_s2020_dickerson_guest_lecture.pdf" [[day04.covers]] title = "Guest lecture slidedeck in pptx" notes = "slides/cmsc320_s2020_dickerson_guest_lecture.pptx" [day05] date = "2/10" [[day05.covers]] title = "Data representation models: The ER model" notes = "bookdown-notes/tidy-data-i-the-er-model.html" [[day05.covers]] title = "SQL I: Single-table Queries" notes = "bookdown-notes/sql-i-single-table-queries.html" [[day05.covers]] title = "Lecture Slides: Data representation models" notes = "slides/cmsc320_representation-models.pdf" #[day05.class_notes] #link = "cmsc320_class-notes_20180208.pdf" # [[day05.workdue]] # title = "HW1: Datatypes and wrangling" # link = "homeworks/datatypes_wrangling/" [day06] date = "2/12" [[day06.covers]] title = "Relational data operations: Joins" notes = "bookdown-notes/two-table-operations.html" [[day06.covers]] title = "Rmarkdown and SQL" notes = "misc/sql_baseball.Rmd" [[day06.covers]] title = "Pandas and SQL" notes = "ipynb/pandas_sql.ipynb" [[day06.covers]] title = "Lecture Slides: Joins and Entity Resolution" notes = "slides/cmsc320_representation-models.pdf" # [[day07.covers]] # title = "More SQL: data defintion, consistency and other constructs" # notes = "bookdown-notes/sql-system-constructs.html" # [[day08.covers]] # title = "SQL examples with baseball (Rmarkdown file)" # notes = "classroom-notes/sql_baseball.Rmd" # [[day08.covers]] # title = "SQL examples with baseball (rendered HTML file)" # notes = "classroom-notes/sql_baseball.html" #[day06.class_notes] #link = "cmsc320_class-notes_20180213.pdf" [day07] date = "2/17" [[day07.covers]] title = "Notebook: Building a complex SQL query" notes = "misc/sql_pr.Rmd" [[day07.covers]] title = "Entity Resolution and Record Linkage" notes = "bookdown-notes/entity-resolution-and-record-linkage.html" [[day07.covers]] title = "Lecture Slides: Joins and Entity Resolution" notes = "slides/cmsc320_representation-models.pdf" #[day07.class_notes] #link = "cmsc320_class-notes_20180213.pdf" [day08] date = "2/19" [[day08.covers]] title = "DB parting shots: query optimization and the JSON data model" notes = "bookdown-notes/db-parting-shots.html" [[day08.covers]] title = "Lecture Slides: Query optimization and JSON" notes = "slides/cmsc320_representation-models.pdf" [[day08.covers]] title = "Common operations for data tidying" notes = "bookdown-notes/tidying-data.html" [[day08.covers]] title = "Lecture Slides: Common operations for data tidying" notes = "slides/cmsc320_tidy-data.pdf" [day08a] date = "2/24" [[day08.covers]] title = "Data acquisition: loading and scraping" notes = "bookdown-notes/ingesting-data.html" [[day08a.covers]] title = "Lecture Slides: Scraping" notes = "slides/scrape-dickerson.pdf" [[day08.covers]] title = "Scraping with python: Beautiful Soup" notes = "https://www.digitalocean.com/community/tutorials/how-to-scrape-web-pages-with-beautiful-soup-and-python-3" [[day08a.covers]] title = "Representing and operating on network data" notes = "slides/cmsc320_network-data.pdf" [[day08.covers]] title = "Data cleaning: handling text and dates" notes = "bookdown-notes/text-and-dates.html" # [[day08a.workdue]] # title = "HW2: ER Model and SQL" # link = "homeworks/er_sql/" [day09] date = "2/26" [[day09.covers]] title = "Midterm I catch-up and review" notes = "misc/midtermI_review.pdf" # [[day10.covers]] # title = "Wrangling exercise" # notes = "misc/wrangling_exercise.html" #[day09.class_notes] #link = "cmsc320_class-notes_20180222.pdf" [day10] date = "3/2" [[day10.covers]] title = "Midterm I" [day11] date = "3/4" [[day11.covers]] title = "Iteration over rows in two data frames (R)" notes = "misc/two_tables_example_r.html" [[day11.covers]] title = "Iteration over rows in two data frames (python)" notes = "misc/two_tables_example_python.html" [[day11.covers]] title = "Exploratory Data Analysis 1: Visualization" notes = "bookdown-notes/exploratory-data-analysis-visualization.html" [[day11.covers]] title = "Lecture Slides: Exploratory Data Analysis 1 - Visualization" notes = "slides/cmsc320-eda.pdf" #[day11.class_notes] #link = "cmsc320_class-notes_20180301.pdf" [day12] date = "3/9" [[day12.covers]] title = "Exploratory Data Analysis 2: Summary statistics" notes = "bookdown-notes/exploratory-data-analysis-summary-statistics.html" [[day12.covers]] title = "Lecture Slides: Exploratory Data Analysis 2 - Summary Statistics" notes = "slides/cmsc320-eda.pdf" #[day12.class_notes] #link = "cmsc320_class-notes_20180306.pdf" # [day14a] # date = "3/15" # [[day14a.workdue]] # title = "Project 1: Data scraping and cleaning" # link = "projects/Project1/" [day13] date = "3/11" # [[day13.covers]] # title = "Project questions (3:30-3:55)" # [[day13.covers]] # title = "Data Science Day: <NAME> Keynote Talk (ESJ 0202)" # notes = "https://datascienceday.math.umd.edu/home/agenda" [[day13.covers]] title="Data transformations" notes = "bookdown-notes/eda-data-transformations.html" [[day13.covers]] title = "Lecture Slides: Data Transformations" notes = "slides/cmsc320-transform.pdf" [[day13.covers]] title = "Lecture Recording: Data Transformations" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=4cf585d7-59f0-4557-9228-ab89001a9242" #[day13.class_notes] #link = "cmsc320_class-notes_20180308.pdf" [day13a] date = "3/16" [[day13a.covers]] title = "SPRING BREAK" [day13b] date = "3/18" [[day13b.covers]] title = "SPRING BREAK" [day14] date = "3/23" [[day14.covers]] title = "CLASS CANCELLED - PREP WEEK FOR ONLINE MOVE" notes = "campus-closure/index.html" #[day14.class_notes] #link = "cmsc320_class-notes_20180314.pdf" [day15] date = "3/25" [[day15.covers]] title = "CLASS CANCELLED - PREP WEEK FOR ONLINE MOVE" notes = "campus-closure/index.html" [[day15.covers]] title = "Handling missing data" notes = "bookdown-notes/eda-handling-missing-data.html" [[day15.covers]] title = "Lecture slides: Handling missing data" notes = "slides/cmsc320_missing-data.pdf" [[day15.covers]] title = "Lecture recording: Handling missing data" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=f16d0b89-0456-44f0-8620-ab8901862c4d" # [day16a] # date = "4/1" # #[[day15a.workdue]] # #title = "Project 1: Data scraping and cleaning" # #link = "projects/project1/" # [[day16a.workdue]] # title = "Homework 3: Data Transformations" # link = "homeworks/transformations/" [day16] date = "3/30" [[day16.covers]] title = "Univariate distributions and statistics" notes = "bookdown-notes/univariate-distributions-and-statistics.html" [[day16.covers]] title = "Lecture slides: Univariate distributions and statistics" notes = "slides/cmsc320_stat-intro.pdf" [[day16.covers]] title = "Lecture Recordings: Univariate distributions and statistics" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=9f9ad8e0-2007-4ea4-842c-ab8e00ebdaf6" #[day16.class_notes] #link = "cmsc320_class-notes_20180327.pdf" # out for simons [day17] date = "4/1" [[day17.covers]] title = "Experiment design and hypothesis testing" notes = "bookdown-notes/experiment-design-and-hypothesis-testing.html" [[day17.covers]] title = "Multivariate probability and statistics" notes = "bookdown-notes/multivariate-probability.html" [[day17.covers]] title = "Lecture Slides: Design and multivariate probability" notes = "slides/cmsc320-stat_next.pdf" [[day17.covers]] title = "Lecture Recordings: Design and multivariate probability" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=493deda6-9724-4c8c-9ff5-ab9000e9f3a2" #[day17.class_notes] #link = "cmsc320_class-notes_20180329.pdf" [day18] date = "4/6" [[day18.covers]] title = "Data Analysis with Geometry" notes = "bookdown-notes/data-analysis-with-geometry.html" [[day18.covers]] title = "Lecture Slides: Data analysis with geometry" notes = "slides/cmsc320_geometry.pdf" [[day18.covers]] title = "Linear models for regression" notes = "bookdown-notes/linear-regression.html" [[day18.covers]] title = "Lecture Slides: Linear models for regression" notes = "slides/cmsc320_linear-regression.pdf" [[day18.covers]] title = "Lecture Recording: Data analysis with geometry and linear regression" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=b2f39070-508d-42c5-9d84-ab9500db861c" [[day18.covers]] title = "Jupyter Notebook: Linear regression in python with statsmodels" notes = "ipynb/regression.ipynb" #[day18.class_notes] #link = "cmsc320_class-notes_20180403.pdf" # [day18a] # date = "4/5" # [[day18a.workdue]] # title = "Project 2: Data Wrangling and EDA" # link = "projects/project2/" [day19] date = "4/8" [[day19.covers]] title = "Linear models for classification" notes = "bookdown-notes/linear-models-for-classification.html" [[day19.covers]] title = "Lecture Slides: Linear models for classification" notes = "slides/cmsc320_logistic-regression.pdf" [[day19.covers]] title = "Lecture Recording: Linear and logistic regression" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=78509e89-a0d8-4040-bc8b-ab9700ece3ad" #[day19.class_notes] #link = "cmsc320_class-notes_20180405.pdf" [day20] date = "4/13" [[day20.covers]] title = "Solving linear ML problems" notes = "bookdown-notes/solving-linear-ml-problems.html" [[day20.covers]] title = "Lecture Slides: Solving linear ML problems" notes = "slides/cmsc320_solving-linear-problems.pdf" [[day20.covers]] title = "Lecture Recording: Solving linear ML problems (Gradient Descent)" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=fdc434bd-67df-4d0d-9938-ab9c011d04e4" #[day20.class_notes] #link = "cmsc320_class-notes_20180410.pdf" # out for daghstul [day21] date = "4/15" [[day21.covers]] title = "Midterm II catch-up and review" notes = "misc/midtermII_review.pdf" # [[day20.workdue]] # title = "Homework 4: A/B Testing" # link = "homeworks/ab_testing/" #[day21.class_notes] #link = "cmsc320_class-notes_20180412.pdf" # [[day20.workdue]] # title = "Project 2: Data wrangling and EDA" # link = "projects/Project2/" [day22] date = "4/20" [[day22.covers]] title = "Midterm II" # [[day22.covers]] # title = "In class exercise: classification" # notes = "misc/prediction_metrics.pdf" # [[day22.covers]] # title = "In class discussion: gradient descent for logistic regression" # notes = "bookdown-notes/solving-linear-ml-problems.html#logistic-regression-1" #[day23.class_notes] #link = "cmsc320_class-notes_20180419.pdf" # [day21a] # date = "4/19" # [[day21a.workdue]] # title = "Homework 5: Regression Interpretation" # link = "homeworks/regression/" #[day22.class_notes] #link = "cmsc320_class-notes_20180417.pdf" [day23] date = "4/22" [[day23.covers]] title = "Non-linear models for regression and classification: Trees" notes = "bookdown-notes/tree-based-methods.html" [[day23. covers]] title = "Random Forests" notes = "bookdown-notes/tree-based-methods.html" [[day23.covers]] title = "Lecture Slides: Tree-Based Methods" notes = "slides/cmsc320_tree-methods.pdf" [[day23.covers]] title = "Lecture Videos: Tree-based Methods" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=4f849dc6-b286-4fb3-86ed-aba501114e26" # [[day22.covers]] # title = "Non-linear models for regression and classification: K-Nearest Neighbors" # notes = "lecture-note/svms/" [day24] date = "4/27" [[day24.covers]] title = "Classification Metrics" notes = "bookdown-notes/model-selection-and-evaluation.html#classifier-evaluation" [[day24.covers]] title = "Model evaluation using resampling" notes = "bookdown-notes/model-selection-and-evaluation.html#model-selection" [[day24.covers]] title = "Lecture slides: Classifer evaluation and model selection" notes = "slides/cmsc320_model_selection.pdf" [[day24.covers]] title = "Lecture videos: Model evaluation and selection" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=c21e65b7-0504-4ee5-8030-abaa00dcfcd0" [day25] date = "4/29" [[day25.covers]] title = "Unsupervised Learning: Clustering" notes = "bookdown-notes/unsupervised-learning-clustering.html" [[day25.covers]] title = "Lecture Slides: Clustering" notes = "slides/cmsc320_clustering.pdf" [[day25.covers]] title = "Unsupervised Learning: Dimensionality Reduction" notes = "bookdown-notes/unsupervised-learning-dimensionality-reduction.html" [[day25.covers]] title = "Lecture Slides: PCA" notes = "slides/cmsc320_dimensionality-reduction.pdf" [[day25.covers]] title = "Lecture Videos: Unsupervised Learning" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=7b418972-f55c-4cf8-85a3-abac0116adc0" #[day25.class_notes] #link = "cmsc320_class-notes_20180426.pdf" # [[day24.covers]] # title = "Project 3 Discussion: matrix representation of learning problems, maximum likelihood and logistic regression" # [day25a] # date = "5/6" # [[day25a.workdue]] # title = "Project 3: Regression and Classification" # link = "projects/project3/" [day26] date = "5/4" [[day26.covers]] title = "Introduction to interactive data visualization" notes = "slides/cmsc320_interactive-vis.pdf" [[day26.covers]] title = "Lecture Videos: Interactive Visualization" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=000ec697-532c-4016-a62e-abb10122b530" [day27] date = "5/6" [[day27.covers]] title = "Writing about data, analysis and inferences" notes = "slides/cmsc320_presentation.pdf" [[day27.covers]] title = "Design principles for data visualization" notes = "slides/cmsc320_presentation.pdf" [[day27.covers]] title = "Lecture Videos: Communication" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=88fd2bdd-612b-42b8-a609-abb300ded7b7" #[day26.class_notes] #link = "cmsc320_class-notes_20180501.pdf" [day28] date = "5/11" [[day28.covers]] title = "The Keras way to (deep) learn" notes = "slides/cmsc320_deeplearn.pdf" [[day28.covers]] title = "Course wrapup and perspective" notes = "slides/cmsc320_wrapup.pdf" [[day28.covers]] title = "Lecture Videos: Deep Learning and Wrapup" notes = "https://umd.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=5cf50d83-8352-4f9f-a13f-abb800dc0fe0" # [day28] # date = "5/14" #[day29] #date = "5/14" # [[day28.workdue]] # title = "Project 4: Interactive data maps" # link = "projects/project4/" # [day29] # date = "5/17" # [[day29.workdue]] # title = "Bonus Project: Classification" # link = "projects/bonus_project/" # [day30] # date = "5/22" # [[day30.workdue]] # title = "Final Project (due 12:30pm)" # link = "projects/final_project/" # [day29] # date = "12/2" # [[day29.covers]] # title = "THIS IS NOT A LECTURE DAY" # [[day29.workdue]] # title = "Project 3: Regression and Classification" # link = "projects/" # [[day32.workdue]] # title = "Project 4: Interactive Data Visualization" # link = "projects/" # [day33] # date = "12/17" # [[day33.covers]] # title = "Final exam (1:30p-3:30p)" <file_sep>/materials/lecture-notes/scratch/12-relational_operations.Rmd # Relational Operations <file_sep>/content/projects/final_project.md --- date: 2018-04-03T10:57:10-04:00 title: "CMSC320 Final Project" author: "<NAME> (original design: <NAME>)" --- ## Summary In lieu of a final exam, CMSC320 students will turn in a tutorial that will walk users through the entire data science pipeline: data curation, parsing, and management; exploratory data analysis; hypothesis testing and machine learning to provide analysis; and then the curation of a message or messages covering insights learned during the tutorial. Students may choose an application area and dataset(s) that are of interest to them; please feel free to be creative about this! Remember, the [course resources page](/resources) has a number of data repositories from where you can download data. You could also create your own dataset using scraping skills you have learned in class. We in fact, encourage this. The tutorial should be self-contained as an Rmarkdown document, and delivered as a GitHub statically-hosted Page (described below). You can see examples here: For R - https://www.kaggle.com/kernels?sortBy=votes&group=everyone&pageSize=20&language=R For python - https://www.kaggle.com/kernels?sortBy=votes&group=everyone&pageSize=20&language=Python In general, the tutorial should contain at least 1500 words of prose and 150 lines of (nonpadded, legitimate) R or Python code, along with appropriate documentation, visualization, and links to any external information that might help the reader. ### Github Pages GitHub provides a service called Pages (https://pages.github.com/) that provides website hosting functionality backed by a GitHub-based git repository. We would like you to host your final project on a GitHub Pages project site. To do this, you will need to: 1. Create a GitHub account (or use the one you already have) with username username. 2. Create a git repository titled username.github.io; make sure username is the same as whatever you chose for your global GitHub account. 3. Create a project within this repository. This is where you’ll dump your Rmarkdown file and an HTML export of that Rmarkdown file. The deliverable to the CMSC320 staff will then be a single URL pointing to this publicly hosted GitHubs Pages-backed website. ## Grading We will assign a numeric score between 1 and 10 for each of the following six criteria: 1. **Motivation**. Does the tutorial make the reader believe the topic is relevant or important (i) in general and (ii) with respect to data science? 2. **Understanding**. After reading through the tutorial, does an uninformed reader feel informed about the topic? Would a reader who already knew about the topic feel like s/he learned more about it? 3. **Other resources**. Does the tutorial link out to other resources (on the web, in books, etc) that would give a lagging reader additional help on specific topics, or an advanced reader the ability to dive more deeply into a specific application area or technique? 4. **Prose**. Does the prose portion of the tutorial actually add to the content of the deliverable? 5. **Code**. Is the code well written, well documented, reproducible, and does it help the reader understand the tutorial? Does it give good examples of specific techniques? 6. **Subjective evaluation**. If somebody linked to this tutorial from, say, Hacker News, would people actually read through the entire thing? ## Group Work Final projects can be prepared in groups of at most three members. On ELMS, each individual in a group will be asked to submit the link to the github page hosting their project, plus a statement about group composition and contributions. Further instructions will be available on the ELMS submission page. <file_sep>/materials/classroom-scripts/cmsc320_class-script_20190219.R library(tidyverse) library(rvest) # parse html file into node data structure url <- "https://www.spaceweatherlive.com/en/solar-activity/top-50-solar-flares" html <- url %>% read_html() # select tables by element # css selector html %>% html_nodes("table") # select document node with # top 50 solar flare table # by class css selector table_node <- html %>% html_nodes(".table-responsive-md") # select children nodes corresponding # to table rows using element css # selector table_node %>% html_nodes("tr") # parse table into data frame table_node %>% html_table() <file_sep>/materials/projects/gapminder.Rmd --- title: "Project 3: Regression analysis of Gapminder data" author: "CMSC320" output: html_document --- ```{r, echo=FALSE} knitr::opts_chunk$set(cache=FALSE) ``` **POSTED**: April 7, 2020 **LAST UPDATE**: April 20, 2020 **DUE**: April 20, 2020 In this project you will practice and experiment with linear regression using data from [gapminder.org](http://gapminder.org). I recommend spending a little time looking at material there, it is quite an informative site. We will use a subset of data provided by gapminder provided by [<NAME>](http://www.stat.ubc.ca/~jenny/) described in it's [github page](https://github.com/jennybc/gapminder). The following commands load the dataset in R ```{r} library(gapminder) data(gapminder) gapminder ``` For this exercise you will explore how life expectancy has changed over 50 years across the world, and how economic measures like gross domestic product (GDP) are related to it. For python (or R I suppose), you can get the data from http://www.hcbravo.org/IntroDataSci/misc/gapminder.csv. **Exercise 1**: _Make a scatter plot of life expectancy across time._ **Question 1**: _Is there a general trend (e.g., increasing or decreasing) for life expectancy across time? Is this trend linear? (answering this qualitatively from the plot, you will do a statistical analysis of this question shortly)_ A slightly different way of making the same plot is looking at the distribution of life expectancy across countries as it changes over time: ```{r, fig.width=12, message=FALSE} library(tidyverse) library(ggplot2) gapminder %>% ggplot(aes(x=factor(year), y=lifeExp)) + geom_violin() + labs(title="Life expectancy over time", x = "year", y = "life expectancy") ``` This type of plot is called a _violin plot_, and it displays the distribution of the variable in the y-axis for each value of the variable in the x-axis. **Question 2**: _How would you describe the distribution of life expectancy across countries for individual years? Is it skewed, or not? Unimodal or not? Symmetric around its center?_ Based on this plot, consider the following questions. **Question 3**: _Suppose I fit a linear regression model of life expectancy vs. year (treating it as a continuous variable), and test for a relationship between year and life expectancy, will you reject the null hypothesis of no relationship? (do this without fitting the model yet. I am testing your intuition.)_ **Question 4**: _What would a violin plot of residuals from the linear model in Question 3 vs. year look like? (Again, don't do the analysis yet, answer this intuitively)_ **Question 5**: _According to the assumptions of the linear regression model, what **should** that violin plot look like?_ **Exercise 2**: _Fit a linear regression model using the `lm` function for life expectancy vs. year (as a continuous variable). Use the `broom::tidy` to look at the resulting model._ **Question 6**: _On average, by how much does life expectancy increase every year around the world?_ **Question 7**: _Do you reject the null hypothesis of no relationship between year and life expectancy? Why?_ **Exercise 3**: _Make a violin plot of residuals vs. year for the linear model from Exercise 2 (use the `broom::augment` function)._ **Question 8**: _Does the plot of Exercise 3 match your expectations (as you answered Question 4)?_ **Exercise 4**: _Make a boxplot (or violin plot) of model residuals vs. continent._ **Question 9**: _Is there a dependence between model residual and continent? If so, what would that suggest when performing a regression analysis of life expectancy across time?_ **Exercise 5**: _Use `geom_smooth(method=lm)` in ggplot as part of a scatter plot of life expectancy vs. year, grouped by continent (e.g., using the `color` aesthetic mapping)._ **Question 10**: _Based on this plot, should your regression model include an interaction term for continent **and** year? Why?_ **Exercise 6**: _Fit a linear regression model for life expectancy including a term for an interaction between continent and year. Use the `broom::tidy` function to show the resulting model._ **Question 11**: _Are all parameters in the model significantly different from zero? If not, which are not significantly different from zero?_ **Question 12**: _On average, by how much does life expectancy increase each year for each continent? (Provide code to answer this question by extracting relevant estimates from model fit)_ **Exercise 7**: _Make a residuals vs. year violin plot for the interaction model. Comment on how well it matches assumptions of the linear regression model. Do the same for a residuals vs. fitted values plot._ ### Submission Prepare and knit a single Rmarkdown file or jupyter notebook that includes submission information described above. All axes in plots should be labeled in an informative manner. Your answers to any exercise that refers to a plot should include both (a) a text description of your plot, and (b) a sentence or two of interpretation. <file_sep>/materials/lecture-notes/15-ingesting.Rmd # Ingesting data Now that we have a better understanding of data analysis languages with `tidyverse` and `SQL`, we turn to the first significant challenge in data analysis, getting data into R in a shape that we can use to start our analysis. We will look at two types of data ingestion: _structured ingestion_, where we read data that is already structured, like a comma separated value (CSV) file, and _scraping_ where we obtain data from text, usually in websites. There is an excellent discussion on data import here: http://r4ds.had.co.nz/data-import.html ## Structured ingestion ### CSV files (and similar) We saw in a previous chapter how we can use the `read_csv` file to read data from a CSV file into a data frame. Comma separated value (CSV) files are structured in a somewhat regular way, so reading into a data frame is straightforward. Each line in the CSV file corresponds to an observation (a row in a data frame). Each line contains values separated by a comma (`,`), corresponding to the variables of each observation. This ideal principle of how a CSV file is constructed is frequently violated by data contained in CSV files. To get a sense of how to deal with these cases look at the documentation of the `read_csv` function. For instance: - the first line of the file may or may not contain the names of variables for the data frame (`col_names` argument). - strings are quoted using `'` instead of `"` (`quote` argument) - missing data is encoded with a non-standard code, e.g., `-` (`na` argument) - values are separated by a character other than `,` (`read_delim` function) - file may contain header information before the actual data so we have to skip some lines when loading the data (`skip` argument) You should read the documentation of the `read_csv` function to appreciate the complexities it can maneuver when reading data from structured text files. ```{r, eval=FALSE} ?read_csv ``` When loading a CSV file, we need to determine how to treat values for each attribute in the dataset. When we call `read_csv`, it guesses as to the best way to parse each attribute (e.g., is it a number, is it a factor, is it free text, how is missing data encoded). The `readr` package implements a set of core functions `parse_*` that parses vectors into different data types (e.g., `parse_number`, `parse_datetime`, `parse_factor`). When we call `read_csv` it will print it's data types guesses and any problems it encounters. The `problems` function let's you inspect parsing problems. E.g., ```{r} df <- read_csv(readr_example("challenge.csv")) problems(df) ``` The argument `col_types` is used to help the parser handle datatypes correctly. In class discussion: how to parse `readr_example("challenge.csv")` Other hints: - You can read every attribute as character using `col_types=cols(.default=col_character())`. Combine this with `type_convert` to parse character attributes into other types: ```{r} df <- read_csv(readr_example("challenge.csv"), col_types=cols(.default=col_character())) %>% type_convert(cols(x=col_double(), y=col_date())) ``` - If nothing else works, you can read file lines using `read_lines` and then parse lines using string processing operations (which we will see shortly). ### Excel spreadsheets Often you will need to ingest data that is stored in an Excel spreadsheet. The `readxl` package is used to do this. The main function for this package is the `read_excel` function. It contains similar arguments to the `read_csv` function we saw above. ## Scraping Often, data we want to use is hosted as part of HTML files in webpages. The markup structure of HTML allows to parse data into tables we can use for analysis. Let's use the Rotten Tomatoes ratings webpage for Diego Luna as an example: ![](img/rt_diegoluna.png) We can scrape ratings for his movies from this page. To do this we need to figure out how the HTML page's markup can help us write R expressions to find this data in the page. Most web browsers have facilities to show page markup. In Google Chrome, you can use `View>Developer>Developer Tools`, and inspect the page markdown to find where the data is contained. In this example, we see that the data we want is in a `<table>` element in the page, with id `filmographyTbl`. ![](img/rt_devtools.png) Now that we have that information, we can use the `rvest` package to scrape this data: ```{r ingest_dl, cache=TRUE} library(rvest) url <- "https://www.rottentomatoes.com/celebrity/diego_luna" dl_tab <- url %>% read_html() %>% html_node(".celebrity-filmography") %>% html_node("table") %>% html_table() head(dl_tab) ``` The main two functions we used here are `html_node` and `html_table`. `html_node` finds elements in the HTML page according to some selection criteria. Since we want the element with `id=filmographyTbl` we use the `#` selection operation since that corresponds to selection by id. Once the desired element in the page is selected, we can use the `html_table` function to parse the element's text into a data frame. The argument to the `html_node` function uses CSS selector syntax: https://www.w3.org/TR/CSS2/selector.html **On your own:** If you wanted to extract the TV filmography from the page, how would you change this call? ### Scraping from dirty HTML tables We saw above how to extract data from HTML tables. But what if the data we want to extract is not cleanly formatted as a HTML table, or is spread over multiple html pages? Let's look at an example where we scrape titles and artists from billboard #1 songs: https://en.wikipedia.org/wiki/List_of_Billboard_Hot_100_number-one_singles_of_2017 Let's start by reading the HTML markup and finding the document node that contains the table we want to scrape ```{r read_billboard_2017} library(rvest) url <- "https://en.wikipedia.org/wiki/List_of_Billboard_Hot_100_number-one_singles_of_2017" singles_tab_node <- read_html(url) %>% html_node(".plainrowheaders") singles_tab_node ``` Since the rows of the table are not cleanly aligned, we need to extract each attribute separately. Let's start with the dates in the first column. Since we noticed that the nodes containing dates have attribute `scope` we use the attribute CSS selector `[scope]`. ```{r extract_date} dates <- singles_tab_node %>% html_nodes("[scope]") %>% html_text() dates %>% head() ``` Next, we extract song titles, first we grab the `tr` (table row) nodes and extract from each the first `td` node using the `td:first-of-type` CSS selector. Notice that this gets us the header row which we remove using the `magrittr::extract` function. The title nodes also tell us how many rows this spans, which we grab from the `rowspan` attribute. ```{r extract_titles} title_nodes <- singles_tab_node %>% html_nodes("tr") %>% html_node("td:first-of-type") %>% magrittr::extract(-1) song_titles <- title_nodes %>% html_text() title_spans <- title_nodes %>% html_attr("rowspan") cbind(song_titles, title_spans) %>% head(10) ``` To get artist names we get the second data element (`td`) of each row using the `td:nth-of-type(2)` CSS selector (again removing the first entry in result coming from the header row) ```{r extract_artists} artist_nodes <- singles_tab_node %>% html_nodes("tr") %>% html_node("td:nth-of-type(2)") %>% magrittr::extract(-1) artists <- artist_nodes %>% html_text() artists %>% head(10) ``` Now that we've extracted each attribute separately we can combine them into a single data frame ```{r make_df} billboard_df <- data_frame(month_day=dates, year="2017", song_title_raw=song_titles, title_span=title_spans, artist_raw=artists) billboard_df ``` This is by no means a clean data frame yet, but we will discuss how to clean up data like this in later lectures. We can now abstract these operations into a function that scrapes the same data for other years. ```{r scrape_fun} scrape_billboard <- function(year, baseurl="https://en.wikipedia.org/wiki/List_of_Billboard_Hot_100_number-one_singles_of_") { url <- paste0(baseurl, year) # find table node singles_tab_node <- read_html(url) %>% html_node(".plainrowheaders") # extract dates dates <- singles_tab_node %>% html_nodes("[scope]") %>% html_text() # extract titles and spans title_nodes <- singles_tab_node %>% html_nodes("tr") %>% html_node("td:first-of-type") %>% magrittr::extract(-1) song_titles <- title_nodes %>% html_text() title_spans <- title_nodes %>% html_attr("rowspan") # extract artists artist_nodes <- singles_tab_node %>% html_nodes("tr") %>% html_node("td:nth-of-type(2)") %>% magrittr::extract(-1) artists <- artist_nodes %>% html_text() # make data frame data_frame(month_day=dates, year=year, song_title_raw=song_titles, title_span=title_spans, artist_raw=artists) } scrape_billboard("2016") ``` We can do this for a few years and create a (very dirty) dataset with songs for this current decade: ```{r scrape_decade} billboard_tab <- as.character(2010:2017) %>% purrr::map_df(scrape_billboard) billboard_tab %>% head(20) %>% knitr::kable("html") ``` The function `purrr::map_df` is an example of a very powerful idiom in functional programming: mapping functions on elements of vectors. Here, we first create a vector of years (as strings) using `as.character(2010:2017)` we pass that to `purrr::map_df` which applies the function we create, `scrape_billboard` on each entry of the year vector. Each of these calls evaluates to a `data_frame` which are then bound (using `bind_rows`) to create a single long data frame. The tidyverse package `purrr` defines a lot of these functional programming idioms. One more thing: here's a very nice example of `rvest` at work: https://deanattali.com/blog/user2017/ <file_sep>/content/lecture-note/eda/index.md --- date: 2016-09-27T18:46:11-04:00 title: Exploratory Data Analysis (EDA) --- Visualization and summarization methods for effective data exploration. [Lecture Notes: Data visualization for EDA](visualization_eda/) [Lecture Notes: Data summarization for EDA](eda_summary_stats/) [Lecture Notes: Data transformations](transformations/) [Lecture Notes: Handling missing data](missing_data/) ## Additional Resources - [Derivative review](/misc/derivative_cheat_sheet.pdf) - [Zumen & Mount Ch. 3]({{< elmurl >}}/files/44629545) - [R for Data Science Ch. 7](http://r4ds.had.co.nz/exploratory-data-analysis.html) - [ggplot2 cheatsheet](http://www.rstudio.com/wp-content/uploads/2015/12/ggplot2-cheatsheet-2.0.pdf) - [ggplot2 textbook](http://www.amazon.com/ggplot2-Elegant-Graphics-Data-Analysis/dp/0387981403) <file_sep>/data/datasci_corner.toml [[entries]] date = "5/14" description = "Keras (Python Deep Learning framework)" link = "https://keras.io/" show = "" [[entries]] date = "5/14" description = "Keras in R" link = "https://keras.rstudio.com/" show = "" [[entries]] date = "5/6" description = "Computational Journalism" link = "https://engineering.stanford.edu/magazine/article/what-future-computational-journalism" show = "" [[entries]] date = "5/4" description = "Shiny: interactive data apps with R" link = "http://shiny.rstudio.com/" show = "" [[entries]] date = "5/4" description = "D3.js: interactive data visualization for JavaScript" link = "https://d3js.org/" show = "" # [[entries]] # date = "5/8" # description = "Visualizing High Dimensional Data In Augmented Reality" # link = "https://medium.com/inside-machine-learning/visualizing-high-dimensional-data-in-augmented-reality-2150a7e62d5b" # [[entries]] # date = "5/8" # description = "Data Science For Good: DonorsChoose" # link = "https://www.kaggle.com/donorschoose/io" # [[entries]] # date = "5/1" # description = "I'm not the only one!!!" # link = "https://xkcd.com/1987/" # [[entries]] # date = "5/7" # description = "The ML algorithms used in self driving cars" # link = "https://www.kdnuggets.com/2017/06/machine-learning-algorithms-used-self-driving-cars.html" # show = "" # [[entries]] # date = "5/2" # description = "Rstudio Updated!! (Don't update until class is done...)" # link = "https://blog.rstudio.com/2019/04/30/rstudio-1-2-release/" # show = "" # [[entries]] # date = "4/25" # description = "Ursa labs, shared infrastructure for data science" # link = "https://ursalabs.org/" # show = "" # [[entries]] # date = "4/23" # description = "Another podcast (via Marine C.)" # link = "http://www.thetalkingmachines.com/episodes" # show = "" # [[entries]] # date = "4/18" # description = "Restoration of brain circulation and cellular function hours post-mortem" # link = "https://www.npr.org/sections/health-shots/2019/04/17/714289322/scientists-restore-some-function-in-the-brains-of-dead-pigs" # show = "" # [[entries]] # date = "4/16" # description = "The 2nd generation p-value" # link = "http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0188299" # show = "" # [[entries]] # date = "4/3" # description = "Mobility charts from The Upshot @ NYTimes" # link = "https://www.nytimes.com/interactive/2018/03/27/upshot/make-your-own-mobility-animation.html?rref=collection%2Fsectioncollection%2Fupshot&action=click&contentCollection=upshot&region=rank&module=package&version=highlights&contentPlacement=7&pgtype=sectionfront" # [[entries]] # date = "3/29" # description = "About this Cambridge Analytica Mess" # link = "https://arstechnica.com/tech-policy/2018/03/facebooks-cambridge-analytica-scandal-explained/" # [[entries]] # date = "3/29" # description = "" # link = "https://motherboard.vice.com/en_us/article/mg9vvn/how-our-likes-helped-trump-win" # [[entries]] # date = "3/29" # description = "The research where it all started. See (a) 'Psychological targeting as an effective approach to digital mass persuasion', and (b) 'Facebook as a Research Tool for the Social Sciences' in the publications list" # link = "http://www.michalkosinski.com/" # [[entries]] # date = "3/27" # description = "Data Science Design Manual (via <NAME>.)" # link = "http://www.springer.com/gp/book/9783319554433" # [[entries]] # date = "3/13" # description = "CBCB Summer Internship Program" # link = "https://www.cbcb.umd.edu/summer-internships" # [[entries]] # date = "4/11" # description = "The amazing life and contributions of <NAME>" # link = "https://www.stat.berkeley.edu/~brill/Papers/life.pdf" # show="" # [[entries]] # date = "2/20" # description = "Of topical interest: example paper from Facebook modeling behavior to spot illegitimate use" # link = "https://research.fb.com/publications/copycatch-stopping-group-attacks-by-spotting-lockstep-behavior-in-social-networks/" # [[entries]] # date = "4/4" # description = "FDA new rules for AI systems in medicine" # link = "https://www.statnews.com/2019/04/02/fda-new-rules-for-artificial-intelligence-in-medicine/" # show = "" [[entries]] date = "4/29" description = "A commentary on the Santa Clara study I alluded to (from <NAME>, his blog is quite the resource, highly recommended)" link = "https://statmodeling.stat.columbia.edu/2020/04/19/fatal-flaws-in-stanford-study-of-coronavirus-prevalence/" show = "" [[entries]] date = "4/27" description = "a16z: Another podcast I like (for those with an entrepenurial bent)" link = "https://a16z.com/2020/04/26/journal-club-machine-learning-antibiotics-coronavirus-protein-structures/" show = "" [[entries]] date = "4/22" description = "From the StitchFix Data Science Blog" link = "https://multithreaded.stitchfix.com/blog/2019/03/11/FullStackDS-Generalists/" show = "" # [[entries]] # date = "3/14" # description = "Donoho's Deep Learning Emergent Phenomena: ImageNet" # link = "https://qz.com/1034972/the-data-that-changed-the-direction-of-ai-research-and-possibly-the-world/" # show = "" # [[entries]] # date = "3/5" # description = "Kaggle Data Science Survey" # link = "https://www.kaggle.com/surveys/2017?utm=cade" # show = "" # [[entries]] # date = "2/26" # description = "The book: playing the percentages in baseball" # link = "https://amzn.to/2BPzJ0X" # show = "" [[entries]] date = "4/15" description = "More on gradient descent" link = "https://towardsdatascience.com/understanding-the-mathematics-behind-gradient-descent-dde5dc9be06e" show = "" [[entries]] date = "4/13" description = "A little blog post on testing in a Bayesian frame of mind" link = "https://www.allendowney.com/blog/2020/04/13/bayesian-hypothesis-testing/" entries = "" [[entries]] date = "4/8" description = "Hacking pdfs for mobility data" link = "https://www.nxn.se/valent/2020/4/7/converting-images-of-line-graphs-to-data" show = "" [[entries]] date = "4/6" description = "Kaggle COVID-19 Open Research Dataset Challenge" link = "https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge" show = "" [[entries]] date = "4/1" description = "Visual explanation of stats and probability" link = "http://students.brown.edu/seeing-theory/" show = "" [[entries]] date = "3/11" description = "COVID-19 Data" link = "https://github.com/CSSEGISandData/COVID-19" show = "" [[entries]] date = "3/9" description = "Uber Engineering Blogpost: platform in 2019" link = "https://eng.uber.com/uber-data-platform-2019/" show = "" [[entries]] date = "3/9" description = "Uber Engineering Blogpost on their Data Science Workbench" link = "https://eng.uber.com/dsw/" show = "" [[ entries ]] date = "3/4" description = "NASA's Helio Hackweek" link = "https://heliohackweek.github.io/" show = "" [[ entries ]] date = "2/24" description = "Data Science is becoming Software Engineering" link = "https://towardsdatascience.com/data-science-is-becoming-software-engineering-53e31314939a" show = "" [[entries]] date = "2/19" description = "One take on Data Science Survey" link = "https://www.theverge.com/2017/11/1/16589246/machine-learning-data-science-dirty-data-kaggle-survey-2017" show = "" [[entries]] date = "2/17" description = "Example wrangling with R and Python" link = "https://www.superdatascience.com/wrangling-data-r-python/" show = "" [[entries]] date = "2/12" description = "The Julia programming language" link = "https://julialang.org/" show = "" [[entries]] date = "2/10" description = "Best practices for computational science" link = "https://openresearchsoftware.metajnl.com/articles/10.5334/jors.ay/" show = "" [[entries]] date = "2/10" description = "Good enough practices in scientific computing" link = "http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510" show = "" [[entries]] date = "2/7" description = "RStudio conference just finished last week" link = "https://resources.rstudio.com/rstudio-conf-2019" show = "" [[entries]] date = "1/29" description = "Data Skeptic: Podcast and more" link = "https://dataskeptic.com/" show = "" [[entries]] date = "1/27" description = "Enterprise Data Analysis and Visualization: An Interview Study. <NAME>, <NAME>, <NAME>, <NAME>" link = "https://idl.cs.washington.edu/papers/enterprise-analysis-interviews/" show = "" [[entries]] date = "1/27" description = "<NAME>onoho (Stanford Prof. of Statistics): 50 years of Data Science" link = "https://www.tandfonline.com/doi/full/10.1080/10618600.2017.1384734" show = "" <file_sep>/materials/slides/representation_models/representation-models.html <!DOCTYPE html> <html lang="" xml:lang=""> <head> <title>Data Representation Models</title> <meta charset="utf-8" /> <meta name="author" content="<NAME>" /> <meta name="date" content="2020-02-12" /> <link href="libs/remark-css/default.css" rel="stylesheet" /> <link href="libs/remark-css/default-fonts.css" rel="stylesheet" /> <link rel="stylesheet" href="custom.css" type="text/css" /> </head> <body> <textarea id="source"> class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Data Representation Models] .author[<NAME>] .other-info[ University of Maryland, College Park, USA 2020-02-12 ] .logo[![](img/logo.png)] --- layout: true ## Overview --- Principles of preparing and organizing data in a way that is amenable for analysis. -- **Data representation model**: collection of concepts that describes how data is represented and accessed. -- Thinking abstractly of data structure, beyond a specific implementation, makes it easier to share data across programs and systems, and integrate data from different sources. --- - **Structure**: We have assumed that data is organized in rectangular data structures (tables with rows and columns) - **Semantics**: We have discussed the notion of _values_, _attributes_, and _entities_. -- So far, _data semantics_: a dataset is a collection of _values_, numeric or categorical, organized into _entities_ (_observations_) and _attributes_ (_variables_). -- Each _attribute_ contains values of a specific measurement across _entities_, and _entities_ collect all measurements across _attributes_. --- In the database literature, we call this exercise of defining structure and semantics as _data modeling_. -- In this course we use the term _data representational modeling_, to distinguish from _data statistical modeling_. --- layout: true ## Data representational modeling --- - **Data model**: A collection of concepts that describes how data is represented and accessed - **Schema**: A description of a specific collection of data, using a given data model --- - Modeling Constructs: A collection of concepts used to represent the structure in the data. Typically we need to represent types of *entities*, their *attributes*, types of *relationships* between *entities*, and *relationship attributes* --- - Integrity Constraints: Constraints to ensure data integrity (i.e., avoid errors) -- - Manipulation Languages: Constructs for manipulating the data --- We desire that models are: - sufficiently _expressive_ so they can capture real-world data well, - _easy to use_, - lend themselves to defining computational methods that have good performance. --- Some examples of data models are - Relational, Entity-relationship model, XML... - Object-oriented, Object-relational, RDF... - Current favorites in the industry: JSON, Protocol Buffers, [Avro](http://avro.apache.org/docs/current/), Thrift, Property Graph --- - **Data independence:** The idea that you can change the representation of data w/o changing programs that operate on it. - **Physical data independence:** I can change the layout of data on disk and my programs won't change - index the data - partition/distribute/replicate the data - compress the data - sort the data --- layout: true ## The Entity-Relationship and Relational Models --- class: split-50 .column[ Modeling constructs: - _entities_ and their _attributes_ - _relationships_ and _relationship attributes_. Entities are objects represented in a dataset: people, places, things, etc. Relationships model just that, relationships between entities. ] .column[ .center.middle.image-50[![](img/er2.png)] ] --- class: split-50 .column[ Diagrams: - rectangles are _entitites_ - diamonds and edges indicate _relationships_ - Circles describe either entity or relationship _attributes_. ] .column[ .center.middle.image-50[![](img/er2.png)] ] --- class: split-50 .column[ Arrows are used indicate multiplicity of relationships ] .column[ .center.middle.image-50[![](img/relationships.png)] ] --- Relationships are defined over _pairs_ of entities. Relationship `\(R\)` over sets of entities `\(E_1\)` and `\(E_2\)` is defined over the _cartesian product_ `\(E_1 \times E_2\)`. For example: if `\(e_1 \in E_1\)` and `\(e_2 \in E_2\)`, then `\((e_1, e_2) \in R\)`. --- Arrows specify how entities participate in relationships. For example: this diagram specifies that entities in `\(E_1\)` appear in _only one_ relationship pair. .center.image-50[![](img/rel_example.png)] That is, if `\(e_i \in E_1\)`, `\(e_j \in E_2\)` and `\((e_i, e_j) \in R\)`, then there is no other pair `\((e_i, e_k) \in R\)`. --- In databases and general datasets we work on, both Entities and Relationships are represented as _Relations_ (tables). -- Such that a _unique_ entity/relationship is represented by a single tuple (the list of attribute values that represent an entity or relationship). -- How can we ensure _uniqueness_ of entities? -- _keys_ are an essential ingredient to uniquely identify entities and relationships in tables. --- layout: true ## Formal introduction to keys --- - Attribute set `\(K\)` is a **superkey** of relation `\(R\)` if values for `\(K\)` are sufficient to identify a unique tuple of each possible relation `\(r(R)\)` - Example: `{SSN}` and `{SSN,name}` are both superkeys of *person* -- - Superkey `\(K\)` is a **candidate key** if `\(K\)` is minimal - Example: `{SSN}` is a candidate key for *person* -- - One of the candidate keys is selected to be the **primary key** - Typically one that is small and immutable (doesn’t change often) - Primary key typically highlighted in ER diagram --- - **Foreign key**: Primary key of a relation that appears in another relation - `{SSN}` from *person* appears in *employs* - *person* called referenced relation - *employs* is the referencing relation -- - **Foreign key constraint**: the tuple corresponding to that primary key must exist - Imagine: - Tuple: `('123-45-6789', 'Apple') `in *employs* - But no tuple corresponding to '123-45-6789' in *person* - Also called referential integrity constraint --- layout: true ## Tidy Data --- We use the term _Tidy Data_ to refer to datasets that are represented in a form that is amenable for manipulation and statistical modeling. It is very closely related to the concept of _normal forms_ in the ER model and the process of _normalization_ in the database literature. --- Here we assume we are working in the ER data model represented as _relations_: rectangular data structures where 1. Each attribute (or variable) forms a column 2. Each entity (or observation) forms a row 3. Each type of entity (observational unit) forms a table --- class: split-50 Here is an example of a tidy dataset: One entity per row, a single attribute per column. Only information about flights included. &lt;table&gt; &lt;thead&gt; &lt;tr&gt; &lt;th style="text-align:right;"&gt; year &lt;/th&gt; &lt;th style="text-align:right;"&gt; month &lt;/th&gt; &lt;th style="text-align:right;"&gt; day &lt;/th&gt; &lt;th style="text-align:right;"&gt; dep_time &lt;/th&gt; &lt;th style="text-align:right;"&gt; sched_dep_time &lt;/th&gt; &lt;th style="text-align:right;"&gt; dep_delay &lt;/th&gt; &lt;th style="text-align:right;"&gt; arr_time &lt;/th&gt; &lt;th style="text-align:right;"&gt; sched_arr_time &lt;/th&gt; &lt;th style="text-align:right;"&gt; arr_delay &lt;/th&gt; &lt;th style="text-align:left;"&gt; carrier &lt;/th&gt; &lt;th style="text-align:right;"&gt; flight &lt;/th&gt; &lt;th style="text-align:left;"&gt; tailnum &lt;/th&gt; &lt;th style="text-align:left;"&gt; origin &lt;/th&gt; &lt;th style="text-align:left;"&gt; dest &lt;/th&gt; &lt;th style="text-align:right;"&gt; air_time &lt;/th&gt; &lt;th style="text-align:right;"&gt; distance &lt;/th&gt; &lt;th style="text-align:right;"&gt; hour &lt;/th&gt; &lt;th style="text-align:right;"&gt; minute &lt;/th&gt; &lt;th style="text-align:left;"&gt; time_hour &lt;/th&gt; &lt;/tr&gt; &lt;/thead&gt; &lt;tbody&gt; &lt;tr&gt; &lt;td style="text-align:right;"&gt; 2013 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 517 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 515 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 2 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 830 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 819 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 11 &lt;/td&gt; &lt;td style="text-align:left;"&gt; UA &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1545 &lt;/td&gt; &lt;td style="text-align:left;"&gt; N14228 &lt;/td&gt; &lt;td style="text-align:left;"&gt; EWR &lt;/td&gt; &lt;td style="text-align:left;"&gt; IAH &lt;/td&gt; &lt;td style="text-align:right;"&gt; 227 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1400 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 5 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 15 &lt;/td&gt; &lt;td style="text-align:left;"&gt; 2013-01-01 05:00:00 &lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td style="text-align:right;"&gt; 2013 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 533 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 529 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 4 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 850 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 830 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 20 &lt;/td&gt; &lt;td style="text-align:left;"&gt; UA &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1714 &lt;/td&gt; &lt;td style="text-align:left;"&gt; N24211 &lt;/td&gt; &lt;td style="text-align:left;"&gt; LGA &lt;/td&gt; &lt;td style="text-align:left;"&gt; IAH &lt;/td&gt; &lt;td style="text-align:right;"&gt; 227 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1416 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 5 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 29 &lt;/td&gt; &lt;td style="text-align:left;"&gt; 2013-01-01 05:00:00 &lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td style="text-align:right;"&gt; 2013 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 542 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 540 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 2 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 923 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 850 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 33 &lt;/td&gt; &lt;td style="text-align:left;"&gt; AA &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1141 &lt;/td&gt; &lt;td style="text-align:left;"&gt; N619AA &lt;/td&gt; &lt;td style="text-align:left;"&gt; JFK &lt;/td&gt; &lt;td style="text-align:left;"&gt; MIA &lt;/td&gt; &lt;td style="text-align:right;"&gt; 160 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1089 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 5 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 40 &lt;/td&gt; &lt;td style="text-align:left;"&gt; 2013-01-01 05:00:00 &lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td style="text-align:right;"&gt; 2013 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 544 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 545 &lt;/td&gt; &lt;td style="text-align:right;"&gt; -1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1004 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1022 &lt;/td&gt; &lt;td style="text-align:right;"&gt; -18 &lt;/td&gt; &lt;td style="text-align:left;"&gt; B6 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 725 &lt;/td&gt; &lt;td style="text-align:left;"&gt; N804JB &lt;/td&gt; &lt;td style="text-align:left;"&gt; JFK &lt;/td&gt; &lt;td style="text-align:left;"&gt; BQN &lt;/td&gt; &lt;td style="text-align:right;"&gt; 183 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1576 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 5 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 45 &lt;/td&gt; &lt;td style="text-align:left;"&gt; 2013-01-01 05:00:00 &lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td style="text-align:right;"&gt; 2013 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 554 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 600 &lt;/td&gt; &lt;td style="text-align:right;"&gt; -6 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 812 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 837 &lt;/td&gt; &lt;td style="text-align:right;"&gt; -25 &lt;/td&gt; &lt;td style="text-align:left;"&gt; DL &lt;/td&gt; &lt;td style="text-align:right;"&gt; 461 &lt;/td&gt; &lt;td style="text-align:left;"&gt; N668DN &lt;/td&gt; &lt;td style="text-align:left;"&gt; LGA &lt;/td&gt; &lt;td style="text-align:left;"&gt; ATL &lt;/td&gt; &lt;td style="text-align:right;"&gt; 116 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 762 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 6 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 0 &lt;/td&gt; &lt;td style="text-align:left;"&gt; 2013-01-01 06:00:00 &lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td style="text-align:right;"&gt; 2013 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 554 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 558 &lt;/td&gt; &lt;td style="text-align:right;"&gt; -4 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 740 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 728 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 12 &lt;/td&gt; &lt;td style="text-align:left;"&gt; UA &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1696 &lt;/td&gt; &lt;td style="text-align:left;"&gt; N39463 &lt;/td&gt; &lt;td style="text-align:left;"&gt; EWR &lt;/td&gt; &lt;td style="text-align:left;"&gt; ORD &lt;/td&gt; &lt;td style="text-align:right;"&gt; 150 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 719 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 5 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 58 &lt;/td&gt; &lt;td style="text-align:left;"&gt; 2013-01-01 05:00:00 &lt;/td&gt; &lt;/tr&gt; &lt;/tbody&gt; &lt;/table&gt; --- layout: true ## Structure Query Language --- The Structured-Query-Language (SQL) is the predominant language used in database systems. It is tailored to the Relational data representation model. SQL is a declarative language, we don't write a _procedure_ to compute a relation, we _declare_ what the relation we want to compute looks like. --- The basic construct in SQL is the so-called `SFW` construct: _select-from-where_ which specifies: - _select_: which attributes you want the answer to have - _from_: which relation (table) you want the answer to be computed from - _where_: what conditions you want to be satisfied by the rows (tuples) of the answer --- E.g.: movies produced by Disney in 1990: note the *rename* ```sql select m.title, m.year from movie m where m.studioname = 'disney' and m.year = 1990 ``` --- The **select** clause can contain expressions (this is paralleled by the `mutate` operation we saw previously) - `select title || ' (' || to_char(year) || ')' as titleyear` - `select 2014 - year` --- The **where** clause support a large number of different predicates and combinations thereof (this is parallel to the `filter` operation) - `year between 1990 and 1995` - `title like 'star wars%'` ` title like 'star wars _'` --- We can include ordering, e.g., find distinct movies sorted by title ```sql select distinct title from movie where studioname = 'disney' and year = 1990 order by title; ``` --- ### Group-by and summarize SQL has an idiom for grouping and summarizing E.g., compute the average movie length by year ```sql select name, avg(length) from movie group by year ``` --- layout: true ## Two-table operations --- So far we have looked at data operations defined over single tables and data frames. In this section we look at efficient methods to combine data from multiple tables. The fundamental operation here is the `join`, which is a workhorse of database system design and impementation. --- ### The `join` operation: Combines rows from two tables to create a new single table Based on matching criteria specified over attributes of each of the two tables. --- Consider a database of `flights` and `airlines`: ```r flights ``` ``` ## # A tibble: 336,776 x 19 ## year month day dep_time sched_dep_time dep_delay arr_time ## &lt;int&gt; &lt;int&gt; &lt;int&gt; &lt;int&gt; &lt;int&gt; &lt;dbl&gt; &lt;int&gt; ## 1 2013 1 1 517 515 2 830 ## 2 2013 1 1 533 529 4 850 ## 3 2013 1 1 542 540 2 923 ## 4 2013 1 1 544 545 -1 1004 ## 5 2013 1 1 554 600 -6 812 ## 6 2013 1 1 554 558 -4 740 ## 7 2013 1 1 555 600 -5 913 ## 8 2013 1 1 557 600 -3 709 ## 9 2013 1 1 557 600 -3 838 ## 10 2013 1 1 558 600 -2 753 ## # … with 336,766 more rows, and 12 more variables: sched_arr_time &lt;int&gt;, ## # arr_delay &lt;dbl&gt;, carrier &lt;chr&gt;, flight &lt;int&gt;, tailnum &lt;chr&gt;, ## # origin &lt;chr&gt;, dest &lt;chr&gt;, air_time &lt;dbl&gt;, distance &lt;dbl&gt;, hour &lt;dbl&gt;, ## # minute &lt;dbl&gt;, time_hour &lt;dttm&gt; ``` --- ```r airlines ``` ``` ## # A tibble: 16 x 2 ## carrier name ## &lt;chr&gt; &lt;chr&gt; ## 1 9E Endeavor Air Inc. ## 2 AA American Airlines Inc. ## 3 AS Alaska Airlines Inc. ## 4 B6 JetBlue Airways ## 5 DL Delta Air Lines Inc. ## 6 EV ExpressJet Airlines Inc. ## 7 F9 Frontier Airlines Inc. ## 8 FL AirTran Airways Corporation ## 9 HA Hawaiian Airlines Inc. ## 10 MQ Envoy Air ## 11 OO SkyWest Airlines Inc. ## 12 UA United Air Lines Inc. ## 13 US US Airways Inc. ## 14 VX Virgin America ## 15 WN Southwest Airlines Co. ## 16 YV Mesa Airlines Inc. ``` --- Here, we want to add airline information to each flight. Join the attributes of the respective airline from the `airlines` table with the `flights` table based on the values of attributes `flights$carrier` and `airlines$carrier`. --- Every row of `flights` with a specific value for `flights$carrier`, is joined with the the corresponding row in `airlines` with the same value for `airlines$carrier`. --- There are multiple ways of performing this operation that differ on how non-matching observations are handled. --- ## Left Join In a `left join`, all observations on left operand (LHS) are retained: .image-50.left[![](img/join_lhs.png)] .image-30.center[![](img/left_join.png)] --- Other operations: - _right join_: all observations in RHS are retained - _outer join_: all observations are retained (full join) - _inner join_: only matching observations are retained Details in lecture notes --- ## Join conditions All join operations are based on a matching condition: ```r flights %&gt;% inner_join(airlines, by="carrier") ``` specifies to join observations where `flights$carrier` equals `airlines$carrier`. --- In this case, where no conditions are specified using the `by` argument: ```r flights %&gt;% left_join(airlines) ``` a *natural join* is perfomed. In this case all variables with the same name in both tables are used in join condition. --- You can also specify join conditions on arbitrary attributes using the `by` argument. ```r flights %&gt;% left_join(airlines, by=c("carrier" = "name")) ``` --- ## SQL Constructs: Multi-table Queries Key idea: - Do a join to combine multiple tables into an appropriate table - Use **SFW** constructs for single-table queries -- For the first part, where we use a join to get an appropriate table, the general SQL construct includes: - The name of the first table to join - The _type_ of join to do - The name of the second table to join - The join condition(s) --- ```sql select title, year, me.name as producerName from movies m join movieexec me where m.producer = me.id; ``` --- layout: true ## Entity Resolution and Record Linkage --- Often, we will be faced with the problem of _data integration_: - combine two (or more) datasets from different sources - that may contain information about the same _entities_. -- But,... the _attributes_ in the two datasets may not be the same, -- Worse: values for the _same_ entity may be different in the two datasets. --- ![](img/entityres1.png) -- ![](img/entityres2.png) --- These are examples of a general problem referred to as **Entity Resolution** and **Record Linkage**. --- ### Problem Definition **Given**: Entity sets `\(E_1\)` and `\(E_2\)`, **Find**: Linked entities `\((e_1,e_2)\)` with `\(e_1 \in E_1\)` and `\(e_2 \in E_2\)`. --- ### One approach: similarity function - Define a _similarity_ function between entities `\(e_1\)` and `\(e_2\)` - Link entities with high similarity. --- Define similarity as an _additive_ function over some set of shared attributes `\(A\)`: `$$s(e_1,e_2) = \sum_{j \in A} s_j(e_1[j], e_2[j])$$` with `\(s_j\)` a similarity function defined for _each_ attribute `\(j\)`, --- layout: true ## Entity Resolution and Record Linkage ### Example attribute functions --- **Categorical attribute**: pairs of entities with the same value are more similar to each other than pairs of entities with different values. E.g., $$ s_j(e_1[j],e_2[j]) = `\begin{cases} 1 &amp; \mathrm{ if } \; e_1[j] == e_2[j] \\ 0 &amp; \mathrm { o.w. } \end{cases}` $$ --- **Continuous attribute**: pairs of entities with values that are _close_ to each other are more similar than pairs of entities with values that are _farther_ to each other. Note that to specify _close_ or _far_ we need to introduce some notion of _distance_. We can use Euclidean distance for example, $$ d_j(e_1[j],e_2[j]) = (e_1[j] - e_2[j])^2; \\ s_j(e_1[j],e_2[j]) = e^{-d_j(e_1[j],e_2[j])} $$ --- **Text attributes**: based on _edit distance_ between strings rather than Euclidean distance. We can use domain knowledge to specify similarity. For example, fact that `John` and `Johnathan` are similar requires domain knowledge of common usage of English names. --- layout: true ## Solving the resolution problem --- Need a rule to match entities we think are linked. This depends on assumptions we make about the dataset, similar to assumptions we made when performing joins. --- Model the entity resolution problem as an _optimization_ problem: maximize _objective function_ (based on similarity) over possible sets `\(V\)` of _valid_ pairs `\((e_1,e_2)\)`, where set `\(V\)` constraints pairs based on problem-specific assumptions. `$$R = \arg \max_{V} \sum_{(e_1,e_2) \in V} s(e_1,e_2)$$` --- ### Many-to-one resolutions Constrain sets `\(V\)` to represent many-to-one resolutions. Thus, entities in `\(e_1\)` can only appear once in pairs in `\(V\)`, but entities `\(e_2\)` may appear more than once. In this case, we can match `\((e_1,e_2)\)` where `$$e_2 = \arg \max_{e \in E_2} s(e_1,e)$$` --- ### One-to-one resolutions Suppose we constrain sets `\(V\)` to those that represent one-to-one resolutions: If `\((e_1,e_2) \in V\)` then `\(e_1\)` and `\(e_2\)` appear in only one pair in `\(V\)`. In this case, we have a harder computational problem. In fact, this is an instance of the _maximum bipartite matching problem_, and would look at network flow algorithms to solve. --- ### Other constraints We can add additional constraints to `\(V\)` to represent other information we have about the task. A common one would be to only allow pairs `\((e_1,e_2) \in V\)` to have similarity above some threshold `\(t\)`. I.e., `\((e_1, e_2) \in V\)` only if `\(s(e_1,e_2) \geq t\)`. --- ### Discussion The procedure outlined above is an excellent first attempt to solve the Entity Resolution problem. This is a classical problem in Data Science for which a variety of approaches and methods are in use. --- layout: true ## Database Query Optimization --- Earlier we made the distinction that SQL is a _declarative_ language rather than a _procedural_ language. A reason why data base systems rely on a declarative language is that it allows the system to decide how to _evaluate_ a query _most efficiently_. --- Consider a Baseball database where we have two tables `Batting` and `Master` _what is the maximum batting "average" for a player from the state of California_? ```sql select max(1.0 * b.H / b.AB) as best_ba from Batting as b join Master as m on b.playerId = m.playerId where b.AB &gt;= 100 and m.birthState = "CA" ``` --- Now, let's do the same computation using `dplyr` operations: Version 1: ```r Batting %&gt;% inner_join(Master, by="playerID") %&gt;% filter(AB &gt;= 100, birthState == "CA") %&gt;% mutate(AB=1.0 * H / AB) %&gt;% summarize(max(AB)) ``` ``` ## max(AB) ## 1 0.4057018 ``` --- Version 2: ```r Batting %&gt;% filter(AB &gt;= 100) %&gt;% inner_join( filter(Master, birthState == "CA")) %&gt;% mutate(AB = 1.0 * H / AB) %&gt;% summarize(max(AB)) ``` ``` ## max(AB) ## 1 0.4057018 ``` --- Which should be most efficient? Think about a simple _cost_ model. The costliest operation here is the join between two tables. .center.image-80[![](img/inner_join_alg.png)] --- What is the cost of this algorithm? `\(|T1| \times |T2|\)`. For the rest of the operations, let's assume we perform this with a single pass through the table. For example, we assume that `filter(T)` has cost `\(|T|\)`. --- Let's write out the cost of each of the two pipelines. ```r Batting %&gt;% inner_join(Master, by="playerID") %&gt;% # cost: |Batting| x |Master| filter(AB &gt;= 100, birthState == "CA") %&gt;% # cost: |R1| mutate(AB=1.0 * H / AB) %&gt;% # cost: |R| summarize(max(AB)) # cost: |R| ``` --- Cost of version 1 is `\(|\mathrm{Batting}|\times|\mathrm{Master}| + |R1| + 2|R|\)` `\(R1\)`: inner join between `Batting` and `Master` `\(R\)`: is `\(R1\)` filtered to rows with `AB &gt;=100 &amp; birthState == "CA"`. In this example: 2.08e+09 --- Now, let's look at the second version. ```r Batting %&gt;% filter(AB &gt;= 100) %&gt;% # cost: |Batting| inner_join( Master %&gt;% filter(birthState == "CA") # cost: |Master| ) %&gt;% # cost: |B1| x |M1| mutate(AB = 1.0 * H / AB) %&gt;% # cost |R| summarize(max(AB)) # cost |R| ``` --- Cost of version 2 is `\(|\mathrm{Batting}| \times |\mathrm{Master}| + |B1|\times|M1|+2|R|\)` `\(B1\)`: `Batting` filtered to include only rows with `AB &gt;= 100` `\(M2\)`: `Master` filtered to include `birthState == "CA"`. In our example: 8.95e+07 --- Version 1 (join tables before filtering) is 23 times costlier. When using SQL in a database system we only write the one query describing our desired result, With the _procedural_ (`dplyr`) we need to think which of the two versions is more efficient. --- Database systems use _query optimization_ to decide how to evaluate queries efficiently. The goal of query optimization is to decide the most efficient query _plan_ to use to evaluate a query out of the many possible candidate plans it could use. It needs to solve two problems: search the space of possible plans, approximate the _cost_ of evaluating a specific plan. --- Think of the two procedural versions above as two candidate plans that the DB system _could_ use to evaluate the query. Query optimzation _approximates_ what it would cost to evaluate each of the two plans and decides to use the most efficient plan. --- layout: true ## Semi-structured Data Representation Model --- The Entity-Relational data model we have described so far is mostly defined for _structured data_: where a specific and consistent schema is assumed. Data models like XML and JSON are instead intended for *semi-structured* data. --- #### XML: eXtensible Markup Language Data models like XML rely on flexible, self-describing schemas: ```xml &lt;?xml version="1.0" encoding="UTF-8"?&gt; &lt;!-- Edited by XMLSpy --&gt; &lt;CATALOG&gt; &lt;CD&gt; &lt;TITLE&gt;Emp<NAME>&lt;/TITLE&gt; &lt;ARTIST&gt;<NAME>&lt;/ARTIST&gt; &lt;COUNTRY&gt;USA&lt;/COUNTRY&gt; &lt;COMPANY&gt;Columbia&lt;/COMPANY&gt; &lt;PRICE&gt;10.90&lt;/PRICE&gt; &lt;YEAR&gt;1985&lt;/YEAR&gt; &lt;/CD&gt; &lt;CD&gt; &lt;TITLE&gt;Hide your heart&lt;/TITLE&gt; &lt;ARTIST&gt;<NAME>&lt;/ARTIST&gt; &lt;COUNTRY&gt;UK&lt;/COUNTRY&gt; &lt;COMPANY&gt;CBS Records&lt;/COMPANY&gt; &lt;PRICE&gt;9.90&lt;/PRICE&gt; &lt;YEAR&gt;1988&lt;/YEAR&gt; &lt;/CD&gt; ... ``` --- ### JSON: Javascript Object Notation ```json { "firstName": "John", "lastName": "Smith", "isAlive": true, "age": 25, "height_cm": 167.6, "address": { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" }, "phoneNumbers": [ { "type": "home", "number": "212 555-1234" }, { "type": "office", "number": "646 555-4567" } ], "children": [], "spouse": null } ``` --- This is the format most contemporary data REST APIs use to transfer data. For instance, here is part of a JSON record from a Twitter stream: ```json { "created_at":"Sun May 05 14:01:34+00002013", "id":331046012875583488, "id_str":"331046012875583488", "text":"\u0425\u043e\u0447\u0443, \u0447\u0442\u043e\u0431 \u0442\u044b \u0441\u0434\u0435\u043b\u0430\u043b \u0432\u0441\u0451 \u043d\u0435\u043e\u0431\u0445\u043e\u0434\u0438\u043c\u043e\u0435.\n \\,,\\ *_* \/,,\/", "source":"\u003ca href=\"http:\/\/twitterfeed.com\"rel=\"nofollow\"\u003etwitterfeed\u003c\/a\u003e", "in_reply_to_user_id_str":null, "user":{ "id":548422428, "id_str":"548422428", "name":"\u0410\u0439\u0433\u0435\u0440\u0438\u043c \u041f\u043e\u0433\u043e\u0434\u0438\u043d\u0430", "screen_name":"paddybyrny", "location":"\u0420\u043e\u0441\u0441\u0438\u044f;\u0412\u043b\u0430\u0434\u0438\u0432\u043e\u0441\u0442\u043e\u043a", "followers_count":4188, "friends_count":4281, "lang":"en", "profile_background_image_url":"http:\/\/a0.twimg.com\/images\/themes\/theme1\/bg.png", }, "geo":null, "coordinates":null, "entities":{ "hashtags":[],"symbols":[],"urls":[],"user_mentions":[] },"favorited":false,"retweeted":false,"filter_level":"medium","lang":"ru"} ``` --- layout: true ## Summary --- We have looked at specifics of **Data Representation Modeling** - Entity Relationship and Relational Models - Definition of _Tidy Data_ - Joining tables - Entity Resolution - Models for semi-structured data </textarea> <style data-target="print-only">@media screen {.remark-slide-container{display:block;}.remark-slide-scaler{box-shadow:none;}}</style> <script src="libs/remark-0.14.0.min.js"></script> <script>var slideshow = remark.create({ "highlightStyle": "github", "highlightLines": true, "countIncrementalSlides": false, "ratio": "16:9" }); if (window.HTMLWidgets) slideshow.on('afterShowSlide', function (slide) { window.dispatchEvent(new Event('resize')); }); (function(d) { var s = d.createElement("style"), r = d.querySelector(".remark-slide-scaler"); if (!r) return; s.type = "text/css"; s.innerHTML = "@page {size: " + r.style.width + " " + r.style.height +"; }"; d.head.appendChild(s); })(document); (function(d) { var el = d.getElementsByClassName("remark-slides-area"); if (!el) return; var slide, slides = slideshow.getSlides(), els = el[0].children; for (var i = 1; i < slides.length; i++) { slide = slides[i]; if (slide.properties.continued === "true" || slide.properties.count === "false") { els[i - 1].className += ' has-continuation'; } } var s = d.createElement("style"); s.type = "text/css"; s.innerHTML = "@media print { .has-continuation { display: none; } }"; d.head.appendChild(s); })(document); // delete the temporary CSS (for displaying all slides initially) when the user // starts to view slides (function() { var deleted = false; slideshow.on('beforeShowSlide', function(slide) { if (deleted) return; var sheets = document.styleSheets, node; for (var i = 0; i < sheets.length; i++) { node = sheets[i].ownerNode; if (node.dataset["target"] !== "print-only") continue; node.parentNode.removeChild(node); } deleted = true; }); })(); // adds .remark-code-has-line-highlighted class to <pre> parent elements // of code chunks containing highlighted lines with class .remark-code-line-highlighted (function(d) { const hlines = d.querySelectorAll('.remark-code-line-highlighted'); const preParents = []; const findPreParent = function(line, p = 0) { if (p > 1) return null; // traverse up no further than grandparent const el = line.parentElement; return el.tagName === "PRE" ? el : findPreParent(el, ++p); }; for (let line of hlines) { let pre = findPreParent(line); if (pre && !preParents.includes(pre)) preParents.push(pre); } preParents.forEach(p => p.classList.add("remark-code-has-line-highlighted")); })(document);</script> <script> remark.macros['scale'] = function (percentage) { var url = this; return '<img src="' + url + '" style=width: ' + percentage + '"/>'; }; </script> <script> (function() { var links = document.getElementsByTagName('a'); for (var i = 0; i < links.length; i++) { if (/^(https?:)?\/\//.test(links[i].getAttribute('href'))) { links[i].target = '_blank'; } } })(); </script> <script> slideshow._releaseMath = function(el) { var i, text, code, codes = el.getElementsByTagName('code'); for (i = 0; i < codes.length;) { code = codes[i]; if (code.parentNode.tagName !== 'PRE' && code.childElementCount === 0) { text = code.textContent; if (/^\\\((.|\s)+\\\)$/.test(text) || /^\\\[(.|\s)+\\\]$/.test(text) || /^\$\$(.|\s)+\$\$$/.test(text) || /^\\begin\{([^}]+)\}(.|\s)+\\end\{[^}]+\}$/.test(text)) { code.outerHTML = code.innerHTML; // remove <code></code> continue; } } i++; } }; slideshow._releaseMath(document); </script> <!-- dynamically load mathjax for compatibility with self-contained --> <script> (function () { var script = document.createElement('script'); script.type = 'text/javascript'; script.src = 'https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-MML-AM_CHTML'; if (location.protocol !== 'file:' && /^https?:/.test(script.src)) script.src = script.src.replace(/^https?:/, ''); document.getElementsByTagName('head')[0].appendChild(script); })(); </script> </body> </html> <file_sep>/materials/projects/classification_zillow.Rmd --- title: "Project 4: Classification" author: "CMSC320" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(warning=FALSE, message=FALSE, cache=TRUE) ``` **Last Update**: `r Sys.Date()` ## Data We will use Mortgage Affordability data from Zillow to experiment with classification algorithms. The data was downloaded from Zillow Research page: https://www.zillow.com/research/data/ It is made available here: http://www.hcbravo.org/IntroDataSci/misc/Affordability_Wide_2017Q4_Public.csv Download the csv file to your project directory. ### Preparing data First, we will tidy the data. Please include this piece of code in your submission. ```{r, message=FALSE, warning=FALSE, cache=FALSE} library(tidyverse) library(lubridate) theme_set(theme_bw()) ``` ```{r} csv_file <- "Affordability_Wide_2017Q4_Public.csv" tidy_afford <- read_csv(csv_file) %>% filter(Index == "Mortgage Affordability") %>% drop_na() %>% filter(RegionID != 0, RegionName != "United States") %>% dplyr::select(RegionID, RegionName, matches("^[1|2]")) %>% gather(time, affordability, matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m"))) tidy_afford ``` This is what the data looks like: ```{r} tidy_afford %>% ggplot(aes(x=time,y=affordability,group=factor(RegionID))) + geom_line(color="GRAY", alpha=3/4, size=1/2) + labs(title="County-Level Mortgage Affordability over Time", x="Date", y="Mortgage Affordability") ``` ## The prediction task The prediction task we are going to answer is: > Can we predict if mortgage affordability will increase or decrease a year from now" Specifically, we will do this for quarter 4 (Q4) of 2017. To create the outcome we will predict we will compare affordability for Q4 of 2017 to Q4 of 2016 and label it as `up` or `down` depending on the sign of the this difference. Let's create the outcome we want to predict (again, copy this bit of code to your submission): ```{r} outcome_df <- tidy_afford %>% mutate(yq = quarter(time, with_year=TRUE)) %>% filter(yq %in% c("2016.4", "2017.4")) %>% select(RegionID, RegionName, yq, affordability) %>% spread(yq, affordability) %>% mutate(diff = `2017.4` - `2016.4`) %>% mutate(Direction = ifelse(diff>0, "up", "down")) %>% select(RegionID, RegionName, Direction) outcome_df ``` Now, you have a dataframe with outcomes (labels) for each county in the dataset. The goal is then given predictors $X_i$ for county $i$, build a classifier for outcome $G_i \in \{\mathtt{up},\mathtt{down}\}$. To train your classifiers you should use data up to 2016. ```{r} predictor_df <- tidy_afford %>% filter(year(time) <= 2016) ``` ## Your project Your goal for this project is to do an experiment to address a (one, single) technical question about our ability to make this prediction. Here is a list of possible questions you may address below. Each of them asks to compare two specific choices in the classification workflow (e.g., two classification algorithms, two feature representations, etc.). You will implement each of the two choices and use 10-fold cross validation (across RegionID's) to compare their relative performance. You will also create an AUROC curve to compare them. ### Possible Questions #### Feature representation and preprocessing - Does standardizing affordability for each region affect prediction performance? Compare standardized to non-standardized affordability. - Is using quarter to quarter change (continuous or discrete) improve prediction performance? Compare quarter to quarter change in affordability as predictors to affordability as predictor? - Should we use the full time series for each region, or should we use only the last few years? Compare full time series to a subset of the time series? - Should we expand the training set to multiple time series per region? For example, create a similar outcome for each time point in the dataset (change relative to affordability one year ago) and use data from the last couple of years as predictors. Train on the extended dataset and test on the 2017 data above? - Should we do dimensionality reduction (PCA) and use the embedded data to do prediction? - Create your own question! #### Classification Algorithm - Is a decision tree better than logistic regression? - Is a random forest better than a decision tree? - Is K-nearest neighbors bettern than a random forest? - Create your own question! Note that you still have to make some choices regardless of the question you choose. For example, to do the feature preprocessing and representation experiments you have to choose a classifier (random forest for example), and decide what to do about hyper-parameters if appropriate. ## Submission Prepare an Rmarkdown file which includes: 1) Code to prepare data (copied from chunks above), plus any additional data prep for your experiment. 2) Discussion of the question you have chosen to address including discussion of other choices you have made (e.g., feature representation, classification algorithm) to carry out your experiment. 3) Code to carry out your cross-validation experiment. 4) Table (result of hypothesis testing difference between algorithms) and plot comparing AUROCs 5) ROC curves for both experimental settings. 5) Interpretation and discussion of your experiment results. Knit to PDF and submit on ELMS. ## An example experiment **Question**: Does the number of trees used in a random forest classifier affect performance (AUROC measured with 5-fold CV)? **Other factors**: We are transforming input data to use quarterly differences after data standardization for years 2014-2016. ### Data preparation First, filter to the years of interest and standardize affordability for each region ```{r} standardized_df <- predictor_df %>% filter(year(time) %in% 2014:2016) %>% group_by(RegionID) %>% mutate(mean_aff = mean(affordability)) %>% mutate(sd_aff = sd(affordability)) %>% mutate(z_aff = (affordability - mean_aff) / sd_aff) %>% ungroup() standardized_df ``` To train our model we need a table with one row per region, and attributes corresponding to differences in quarterly affordability. We will do this in stages, first we turn the tidy dataset into a wide dataset using `tidyr::spread` then create a dataframe containing the differences we use as features. ```{r} wide_df <- standardized_df %>% select(RegionID, time, z_aff) %>% tidyr::spread(time, z_aff) wide_df ``` Now, we turn this into quarterly differences ```{r} matrix_1 <- wide_df %>% select(-RegionID) %>% as.matrix() %>% .[,-1] matrix_2 <- wide_df %>% select(-RegionID) %>% as.matrix() %>% .[,-ncol(.)] diff_df <- (matrix_1 - matrix_2) %>% magrittr::set_colnames(NULL) %>% as_data_frame() %>% mutate(RegionID = wide_df$RegionID) ``` Finally, add the outcome we want to predict from the data frame we created previously. ```{r} final_df <- diff_df %>% inner_join(outcome_df %>% select(RegionID, Direction), by="RegionID") %>% mutate(Direction=factor(Direction, levels=c("down", "up"))) %>% select(-RegionID) final_df ``` ### Run the experiment We will use but 5-fold cross-validation to compare a random forest with 500 trees, with a random forest with 10 trees. Since this is a smallish dataset, I am using 5-fold cross validation to make the validation sets have more examples and therefore more reliable performance estimates. ```{r, cache=FALSE} library(caret) set.seed(1234) ``` ```{r, cache=FALSE} # create the cross-validation partition cv_partition <- createFolds(final_df$Direction, k=5) # setup training parameters fit_control <- trainControl( ## 5-fold CV method = "cv", number = 5, #indexOut = cv_partition, summaryFunction=twoClassSummary, classProbs=TRUE, savePredictions=TRUE) # a function to obtain performance data # (tpr and fpr) over the given cross validation # partitions, for the number of trees in the # random forest get_roc_data <- function(ntree, cv_partition) { mean_fpr <- seq(0, 1, len=100) aucs <- numeric(length(cv_partition)) # iterate over folds res <- lapply(seq_along(cv_partition), function(i) { # train the random forest fit <- train(Direction~., data = final_df[-cv_partition[[i]],], # all but the holdout set method = "rf", ntree = ntree, trControl = fit_control, metric="ROC") # make predictions on the holdout set preds <- predict(fit, newdata=final_df[cv_partition[[i]],],type="prob")$up # compute tpr and fpr from the hold out set perf <- ROCR::prediction(preds, final_df$Direction[cv_partition[[i]]]) %>% ROCR::performance(measure="tpr", x.measure="fpr") fpr <- unlist(perf@x.values) tpr <- unlist(perf@y.values) # interpolate the roc curve over 0, 1 range interp_tpr <- approxfun(fpr, tpr)(mean_fpr) interp_tpr[1] <- 0.0 # collect values for this fold data_frame(fold=rep(i, length(mean_fpr)), fpr=mean_fpr, tpr=interp_tpr) }) # combine values across all folds # into a single data frame do.call(rbind, res) } # calculate area under the ROC curve # from tpr and fpr values across folds compute_auc <- function(curve_df) { curve_df %>% group_by(fold) %>% summarize(auc=pracma::trapz(fpr, tpr)) } ``` ```{r} # get performance data for random forest with # 10 trees small_curve_df <- get_roc_data(ntree = 10, cv_partition) small_auc_df <- compute_auc(small_curve_df) ``` ```{r} # get performance data for random forest with # 500 trees large_curve_df <- get_roc_data(ntree=500, cv_partition) large_auc_df <- compute_auc(large_curve_df) ``` Now let's compare models based on 5-CV area under the ROC curve (AUROC). ```{r, cache=FALSE} # combine performance data for both models # into one data frame (adding column to indicate) # which model was used curve_df <- small_curve_df %>% mutate(model="small") %>% rbind(mutate(large_curve_df, model="large")) %>% mutate(model = factor(model, levels=c("small", "large"))) auc_df <- small_auc_df %>% mutate(model="small") %>% rbind(mutate(large_auc_df, model="large")) %>% mutate(model = factor(model, levels=c("small", "large"))) # plot distribution of ggplot(auc_df, aes(x=model, y=auc)) + geom_jitter(position=position_jitter(0.1)) + coord_flip() + labs(title="AUC comparision", x="Model", y="Area under ROC curve") ``` We test for differences using linear regression. ```{r} library(broom) model_tab <- auc_df %>% lm(auc~model,data=.) %>% tidy() model_tab %>% knitr::kable() ``` We see that there is a small increase (`r format(abs(model_tab$estimate[2]) * 100, digits=2)`%) in average AUROC for the big model but it is not a statistically significant difference. Finally, here are ROC curves of both models. ```{r, warning=FALSE} curve_df %>% group_by(model, fpr) %>% summarize(tpr = mean(tpr)) %>% ggplot(aes(x=fpr, y=tpr, color=model)) + geom_line() + labs(title = "ROC curves", x = "False positive rate", y = "True positive rate") ``` The bigger model has higher true positive rate at lower false positive rates. The smaller model is better at high false positive rates. Neither model is very good at this prediction task, and there is no clear advantage in using either of these for the analysis task. <file_sep>/content/lecture-note/large_scale/index.md --- date: 2016-11-09T09:34:07-05:00 title: Learning at scale --- Systems and algorithms for data summarization and learning on large datasets [Lecture notes on Stochastic Gradient Descent](sgd/) <file_sep>/materials/slides/solving-linear-problems/solving-linear-problems.Rmd --- title: "Solving Linear Problems" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Solving Linear Problems] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r setup1, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` --- layout: true ## Solving linear problems --- How to fit the type of analysis methods we've seen so far? We will use linear regression as a case study of how this insight would work. --- ### Case Study **Given**: Training set $\{(x_1, y_1), \ldots, (x_n, y_n)\}$, with continuous response $y_i$ and single predictor $x_i$ for the $i$-th observation. **Do**: Estimate parameter $\beta_1$ in model $y=\beta_1 x$ to solve $$\min_{\beta_1} L(\beta_1) = \frac{1}{2} \sum_{i=1}^n (y_i - \beta_1 x_i)^2$$ --- And suppose we want to fit this model to the following (simulated) data: ```{r,echo=FALSE, fig.align="center", fig.height=6} set.seed(1234) true_beta <- 5 x <- runif(100, -10, 10) y <- x * true_beta + rnorm(100, mean=0, sd=sqrt(10)) plot(x,y,pch=19,cex=1.4,main="Simulated Data", cex.lab=1.5, cex.main=2) abline(a=0, b=true_beta, col="red", lwd= 2) ``` --- Our goal is then to find the value of $\beta_1$ that minimizes mean squared error. This corresponds to finding one of these many possible lines: ```{r, echo=FALSE, fig.align="center", fig.height=6} plot(x,y,pch=19,cex=1.4,main="Simulated Data", cex.lab=1.5, cex.main=2) abline(a=0, b=true_beta, col="red", lwd= 2) for (b in seq(-6,6, len=5)) { abline(a=0,b=b,col="blue", lwd=2, lty=2) } legend("bottom", legend=paste("beta=", seq(-6,6,len=5)), lwd=2, lty=2, cex=1.5) ``` --- Each of which has a specific error for this dataset: ```{r, echo=FALSE, fig.align="center", fig.height=6} n <- length(y) compute_loss <- function(beta, x, y) { 0.5 * mean((y-x*beta)^2) } beta <- seq(-20, 20, len=100) plot(beta, sapply(beta, compute_loss, x=x, y=y), type="l", lwd=2, ylab=expression(L(beta[1])),cex.lab=1.5,xlab=expression(beta[1])) abline(v=true_beta, col="red", lwd=2) abline(v=seq(-6,6,len=5), col="blue", lwd=2, lty=2) ``` --- Insights: 1) As we saw before in class, loss is minimized when the derivative of the loss function is 0 2) and, the derivative of the loss (with respect to $\beta_1$ ) at a given estimate $\beta_1$ suggests new values of $\beta_1$ with smaller loss! --- Let's take a look at the derivative: $$\frac{\partial}{\partial \beta_{1}} L(\beta_1) = \frac{\partial}{\partial \beta_{1}} \frac{1}{2} \sum_{i=1}^n (y_i - \beta_1 x_i)^2 \\ {} = \sum_{i=1}^n (y_i - \beta_1 x_i) \frac{\partial}{\partial \beta_1} (y_i - \beta_1 x_i) \\ {} = \sum_{i=1}^n (y_i - \beta_1 x_i) (-x_i)$$ --- and plot it for our case study data: ```{r, echo=FALSE, cache=FALSE, fig.align="center", fig.height=6} loss_derivative <- function(beta, x, y) { f <- beta * x resid <- y - f sum(resid * (-x)) } plot(beta, sapply(beta, loss_derivative, x=x, y=y), type="l", lwd=1.5, xlab=expression(beta[1]), ylab=expression(partialdiff * L(beta[1]) / partialdiff * beta[1]),cex.lab=1.7) abline(v=true_beta, col="red", lwd=2) abline(v=seq(-6,6,len=5), col="blue", lwd=2, lty=2) abline(h=0, col="black", lwd=2, lty=2) ``` --- ### Gradient Descent This plot suggests an algorithm: 1. Initialize $\beta_1^0=0$ 2. Repeat for $k=1,2,\ldots$ until convergence - Set $\beta_1^k = \beta_1^{k-1} + \alpha \sum_{i=1}^n (y_i - f(x_i;\beta_1^{k-1})) x_i$ Note: $f(x_i;\beta_1) = \beta_1 x_i$ --- This algorithm is called **gradient descent** in the general case. The basic idea is to move the current estimate of $\beta_1$ in the direction that minimizes loss the *fastest*. Another way of calling this algorithm is **Steepest Descent**. --- ```{r,echo=FALSE, fig.align="center", fig.height=6} # Implementation of gradient descent for least squares regression # for a single predictor (x) # # There is some code here that is only used to generate illustrative plots and would not be part of real solver gradient_descent <- function(x, y, tol=1e-6, maxit=50, plot=FALSE) { # initialize estimate beta_1 <- 0; old_beta_1 <- Inf; i <- 0; beta_keep <- NA # compute loss at first estimate loss <- compute_loss(beta_1, x, y); loss_keep <- NA # starting step size alpha <- 1e-3 difference <- Inf # check for convergence # (in practice, we do include a limit on the number of iterations) while ((difference > tol) && (i < maxit)) { cat("it: ", i, " beta: ", round(beta_1, 2), "loss: ", round(loss, 2), " alpha: ", round(alpha, 6), "\n") # this piece of code just adds steps to an existing plot if (plot && !is.na(beta_keep) && !is.na(loss_keep)) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } # store the last estimate for plotting beta_keep <- beta_1; loss_keep <- loss; # store the last estimate to check convergence old_beta_1 <- beta_1 # update estimate f <- beta_1 * x resid <- y - f beta_1 <- beta_1 + alpha * sum(resid * x) # compute difference after taking step # to check convergence difference <- (beta_1 - old_beta_1)^2 / (old_beta_1)^2 # compute loss and derivative for updated estimate loss <- compute_loss(beta_1, x, y) i <- i+1 # shorten the step size if ((i %% 3) == 0) alpha <- alpha / 2 } if (plot) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } beta_1 } ``` Let's run this algorithm and track what it does: ```{r, echo=FALSE, cache=FALSE, fig.align="center", fig.height=6} plot(beta, sapply(beta, compute_loss, x=x, y=y), type="l", lwd=2, ylab=expression(L(beta[1])),cex.lab=1.5,xlab=expression(beta[1]), xlim=c(-20,20), main="Gradient Descent") estimate <- gradient_descent(x, y, plot=TRUE) ``` --- This algorithm is referred to as "Batch" gradient descent, we take a step (update $\beta_1$) by calculating derivative with respect to _all_ $n$ observations in our dataset. --- For clarity, let's write out the update equation again: $$\beta_1^k = \beta_1^{k-1} + \alpha \sum_{i=1}^n (y_i - f(x_i; \beta_1^{k-1})) x_i$$ where $f(x_i; \beta_1) = \beta_1 x_i$. --- For multiple predictors (e.g., adding an intercept), this generalizes to the _gradient_ i.e., the vector of first derivatives of _loss_ with respect to parameters. In this case, the model sets $$\begin{array}{l} f(\mathbf{x}_i; \mathbf{\beta}) & = & \beta_0 + \beta_1 x_{i1} + \cdots + \beta_p x_{ip} \\ {} & = & \sum_{j=0}^p \beta_j x_{ij} \\ {} & = & \beta'x \end{array}$$ Note: we take $x_{i0} = 1$ --- exclude: false The gradient given by partial derivatives for each parameter $$\nabla_{\mathbf{\beta}}L(\mathbf{\beta}) = \begin{bmatrix} \frac{\partial L(\beta)}{\partial \beta_0} \\ \frac{\partial L(\beta)}{\partial \beta_1} \\ \vdots \\ \frac{\partial L(\beta)}{\partial \beta_p} \end{bmatrix}$$ --- exclude: false The update equation is exactly the same for least squares regression $$\mathbf{\beta}^k = \mathbf{\beta}^{k-1} + \alpha \sum_{i=1}^n (y_i - f(\mathbf{x}_i; \beta^{k-1})) \mathbf{x}_i$$ where $f(\mathbf{x}_i; \mathbf{\beta}) = \beta' \mathbf{x}_i$ Note: $x_{i0}=1$ --- Gradient descent falls within a family of optimization methods called _first-order methods_ These methods have properties amenable to use with very large datasets: 1. Inexpensive updates 2. "Stochastic" version can converge with few sweeps of the data 3. "Stochastic" version easily extended to streams 4. Easily parallelizable Drawback: Can take many steps before converging --- ### Logistic Regression Gradient descent is also used to solve the logistic regression problem. The same procedure follows (1) define a loss function; (2) derive the update equation; (3) run the iterative gradient descent algorithm. --- Let's take a look at the first two steps in this case. For logistic regression, we turn to _maximum likelihood_ to formulate a loss function. For the logistic regression problem we are given dataset $\{\langle \mathbf{x}_1, y_1\rangle, \ldots, \langle \mathbf{x}_n, y_n \rangle \}$, where outcomes $y_i \in \{0,1\}$ since we are learning a binary classification problem. --- The goal is to estimate parameters $\mathbf{\beta}$ in model $$\begin{array}{l} \log{ \frac{p(Y=1 | \mathbf{X}=\mathbf{x})}{1-p(Y=1 | \mathbf{X}=\mathbf{x})}} & = & \beta_0 + \beta_0 x_{1} + \cdots + \beta_p x_{p} \\ {} & = & \beta'\mathbf{x} \end{array}$$ Note: $x_{i0}=1$ --- To establish a loss function we first assume a model for data generation. The assumption we make here is if an entity has attribute values $\mathbf{x}$, then the outcome $Y=1$ with probability given by $$p(\mathbf{x}; \mathbf{\beta}) = \frac{e^{f(\mathbf{x}; \mathbf{\beta})}}{1+e^{f(\mathbf{x}; \mathbf{\beta})}}$$ Note that we use the same notation $f(\mathbf{x}; \beta)=\beta'\mathbf{x}$ as we did in linear regression. --- Now, we can ask, what is the probability of the data we observe for entity $i$ under this model? We can write this probability in this form: $$ p(\mathbf{x}_i; \mathbf{\beta})^{y_i}(1-p(\mathbf{x}_i; \mathbf{\beta}))^{(1-y_i)} $$ --- Now, we can put these together for all observed entities since we assume that these are generated independently to get a _likelihood_ function: $$\mathcal{L}(\mathbf{\beta}) = \prod_{i=1}^n p_i(\mathbf{x}_i;\mathbf{\beta})^{y_i}(1-p_i(\mathbf{x}_i;\mathbf{\beta}))^{(1-y_i)}$$ --- Now, we need to turn this into a loss function we can _minimize_. The likelihood function we wrote down is one we would _maximize_. Also, it is usually more convenient to work with the logarithm of likelihoods. --- The loss function we use for gradient descent is the _negative log likelihood_ $$L(\mathbf{\beta}) = \sum_{i=1}^n -y_i f(\mathbf{x}_i;\mathbf{\beta}) + \log(1+e^{f(\mathbf{x}_i;\mathbf{\beta})})$$ --- So, now that we have a loss function, we need to derive it's gradient to use the gradient descent algorithm. Check the lecture notes. $$\nabla_{\mathbf{\beta}} L(\mathbf{\beta}) = \sum_{i=1}^n (p(\mathbf{x}_i; \mathbf{\beta}) - y_i) \mathbf{x}_i$$ --- Note the nice similarity to the gradient for linear regression. It multiplies each data (expanded) data vector $\mathbf{x}_i$ by the difference between a prediction, in this case the probability that the outcome $y_i=1$ and the observed outcome $y_i$. --- layout: true ## Stochastic gradient descent --- **Key Idea**: Update parameters using update equation _one observation at a time_: 1. Initialize $\beta=\mathbf{0}$, $i=1$ 2. Repeat until convergence - For $i=1$ to $n$ - Set $\beta = \beta + \alpha (y_i - f(\mathbf{x}_i, \beta)) \mathbf{x}_i$ --- ```{r, echo=FALSE} # Implementation of stochastic gradient descent for least squares regression # for a single predictor (x) # # There is some code here that is only used to generate illustrative plots stochastic_gradient_descent <- function(x, y, tol=1e-6, maxit=50, plot=FALSE) { n <- length(y) # initialize estimate beta_1 <- 0; i <- 0; beta_keep <- NA # compute loss at first estimate loss <- compute_loss(beta_1, x, y); loss_keep <- NA # initial step size alpha <- 1e-3 difference <- Inf # check for convergence # (in practice a max number of iterations is used) while ((difference > tol) && (i < maxit)) { cat("it: ", i, " beta: ", round(beta_1, 2), "loss: ", round(loss, 2), " alpha: ", round(alpha, 6), "\n") # store last estimate to check convergence old_beta_1 <- beta_1 # iterate over observations for (j in seq(1,n)) { # add step to plot if (plot && !is.na(beta_keep) && !is.na(loss_keep)) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } # store last estimate and loss for plotting beta_keep <- beta_1; loss_keep <- loss; # update estimate with j-th observation f <- beta_1 * x[j] resid <- y[j] - f beta_1 <- beta_1 + alpha * resid * x[j] # compute loss with new estimate loss <- compute_loss(beta_1, x, y) } # check difference between current and old estimate # to check convergence difference <- (beta_1 - old_beta_1)^2 / old_beta_1^2 i <- i+1 # update step size if ((i %% 5) == 0) alpha <- alpha / 2 } if (plot) { suppressWarnings(arrows(beta_keep, loss_keep, beta_1, loss, lty=2, col="blue")) } beta_1 } ``` Let's run this and see what it does: ```{r, echo=FALSE, cache=FALSE, fig.align="center", fig.height=6} plot(beta, sapply(beta, compute_loss, x=x, y=y), type="l", lwd=2, ylab=expression(L(beta[1])),cex.lab=1.5,xlab=expression(beta[1]), xlim=c(-20,20), main="Stochastic Gradient Descent") estimate <- stochastic_gradient_descent(x, y, plot=TRUE) ``` --- The stochastic gradient descent algorithm can easily adapt to _data streams_ where we receive observations one at a time and _assume_ they are not stored. This setting falls in the general category of _online_ learning. --- ## Parallelizing gradient descent Gradient descent algorithms are easily parallelizable: - Split observations across computing units - For each step, compute partial sum for each partition (map), compute final update (reduce) --- $$\beta^k = \beta^{k-1} + \alpha * \sum_{\mathrm{partition}\; P} \sum_{i \in P} (y_i - f(\mathbf{x_i}; \beta^{k-1})) \mathbf{x}_i$$ --- This observation has resulted in their implementation if systems for large-scale learning: 1. [Vowpal Wabbit](https://github.com/JohnLangford/vowpal_wabbit/wiki) - Implements general framework of (sparse) stochastic gradient descent for many optimization problems - R interface: [http://cran.r-project.org/web/packages/RVowpalWabbit/index.html] --- This observation has resulted in their implementation if systems for large-scale learning: 2. [Spark MLlib](https://spark.apache.org/docs/1.2.1/mllib-guide.html) - Implements many learning algorithms using Spark framework we saw previously - Some access to the MLlib API via R, but built on primitives accessible through `SparkR` library we saw previously <file_sep>/materials/lectures/eda/eda_summary_stats.Rmd --- title: "EDA: summary statistics" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(png) library(grid) library(tidyr) library(dplyr) ``` Let's continue our discussion of Exploratory Data Analysis. In the previous section we saw ways of visualizing data (variables) using plots to start understanding properties of how data is distributed, an essential and preliminary step in data analysis. In this section, we start discussing statistical, or numerical, summaries of data to quantify properties that we observed using visual summaries and representations. Remember that one purpose of EDA is to spot problems in data (as part of data wrangling) and understand variable properties like: - central trends (mean) - spread (variance) - skew - suggest possible modeling strategies (e.g., probability distributions) We also want to use EDA to understand relationship between pairs of variables, e.g. their correlation or covariance. One last note on EDA. <NAME> was an exceptional scientist/mathematician, who had profound impact on statistics and Computer Science. A lot of what we cover in EDA is based on his groundbreaking work. I highly recommend you read more about him: [https://www.stat.berkeley.edu/~brill/Papers/life.pdf](https://www.stat.berkeley.edu/~brill/Papers/life.pdf). ### Range Part of our goal is to understand how variables are distributed in a given dataset. Note, again, that we are not using _distributed_ in a formal mathematical (or probabilistic) sense. All statements we are making here are based on data at hand, so we could refer to this as the _empirical distribution_ of data. Here, _empirical_ is used in the sense that this is data resulting from an experiment. Let's use a dataset on diamond characteristics as an example. ```{r} library(ggplot2) data(diamonds) library(dplyr) hist(diamonds$depth, main="Depth Histogram", xlab="Depth", nclass=200) ``` (Here's some help interpreting these variables: [https://en.wikipedia.org/wiki/Diamond_(gemstone)#Gemological_characteristics](https://en.wikipedia.org/wiki/Diamond_(gemstone)#Gemological_characteristics)). Let's start using some notation to make talking about this a bit more efficient. We assume that we have data across $n$ entitites (or observational units) for $p$ attributes. In this dataset $n=`r nrow(diamonds)`$ and $p=`r ncol(diamonds)`$. However, let's consider a single attribute, and denote the data for that attribute (or variable) as $x_1, x_2, \ldots, x_n$. Ok, so what's the first question we want to ask about how data is distributed? Since we want to understand how data is distributed across a _range_, we should first define the range. ``` diamonds %>% summarize(min_depth = min(depth), max_depth = max(depth)) ``` We use notation $x_{(1)}$ and $x_{(n)}$ to denote the minimum and maximum statistics. In general, we use notation $x_{(q)}$ for the rank statistics, e.g., the $q$th largest value in the data. ### Central Tendency Now that we know the range over which data is distributed, we can figure out a first summary of data is distributed across this range. Let's start with the _center_ of the data: the _median_ is a statistic defined such that half of the data has a smaller value. We can use notation $x_{(n/2)}$ (a rank statistic) to represent the median. Note that we can use an algorithm based on the quicksort partition scheme to compute the median in linear time (on average). ```{r} median_depth <- median(diamonds$depth) hist(diamonds$depth, main="Depth Histogram", xlab="Depth", nclass=200) abline(v=median_depth, lwd=2.3, col="red") ``` ### Derivation of the mean as central tendency statistic Of course, the best known statistic for central tendency is the _mean_, or average of the data: $\overline{x} = \frac{1}{n} \sum_{i=1}^n x_i$. It turns out that in this case, we can be a bit more formal about "center" means in this case. Let's say that the _center_ of a dataset is a point in the range of the data that is _close_ to the data. To say that something is _close_ we need a measure of _distance_. So for two points $x_1$ and $x_2$ what should we use for distance? We could base it on $(x_1 - x_2)$ but that's not enough since its sign depends on the order in which we write it. Using the absolute value solves that problem $|x_1 - x_2|$ since now the sign doesn't matter, but this has some issues that we will see later. So, next best thing we can do is use the square of the difference. So, in this case, the distance between data point $x_1$ and $x_2$ is $(x_1 - x_2)^2$. Here is a fun question: what's the largest distance between two points in our dataset? So, to define the _center_, let's build a criterion based on this distance by adding this distance across all points in our dataset: $$ RSS(\mu) = \frac{1}{2} \sum_{i=1}^n (x_i - \mu)^2 $$ Here RSS means _residual sum of squares_, and we $\mu$ to stand for candidate values of _center_. We can plot RSS for different values of $\mu$: ```{r} rss <- function(mu) { 0.5 * sum((diamonds$depth - mu)^2)} mu_candidates <- seq(min(diamonds$depth), max(diamonds$depth), len=1000) plot(mu_candidates, sapply(mu_candidates, rss), xlab="Depth", ylab="RSS", type="l", lwd=2, main="Residual Sum of Squares") ``` Now, what should our "center" estimate be? We want a value that is _close_ to the data based on RSS! So we need to find the value in the range that minimizes RSS. From calculus, we know that a necessary condition for the minimizer $\hat{\mu}$ of RSS is that the derivative of RSS is zero at that point. So, the strategy to minimize RSS is to compute its derivative, and find the value of $\mu$ where it equals zero. So, let's find the derivative of RSS: $$ \begin{eqnarray} \frac{\partial}{\partial \mu} \frac{1}{2} \sum_{i=1}^n (x_i - \mu)^2 & = & \frac{1}{2} \sum_{i=1}^n \frac{\partial}{\partial \mu} (x_i - \mu)^2 \\ {} & = & \frac{1}{2} \sum_{i=1}^n 2(x_i - \mu) \times \frac{\partial}{\partial \mu} (x_i - \mu) \\ {} & = & \frac{1}{2} \sum_{i=1}^n 2(x_i - \mu) \times (-1) \\ {} & = & \frac{1}{2} 2 \sum_{i=1}^n (\mu - x_i) \\ {} & = & \sum_{i=1}^n \mu - \sum_{i=1}^n x_i \\ {} & = & n\mu - \sum_{i=1}^n x_i \end{eqnarray} $$ ```{r} rss_deriv <- function(mu) { nrow(diamonds)*mu - sum(diamonds$depth)} plot(mu_candidates, sapply(mu_candidates, rss_deriv), xlab="Depth", ylab="RSS Derivative", type="l", lwd=2, main="Derivative of RSS") abline(h=0,lty=2,lwd=1.6) ``` Next, we set that equal to zero and find the value of $\mu$ that solves that equation: $$ \begin{eqnarray} \frac{\partial}{\partial \mu} & = & 0 & \Rightarrow \\ n\mu - \sum_{i=1}^n x_i & = & 0 & \Rightarrow \\ n\mu & = & \sum_{i=1}^n x_i & \Rightarrow \\ \mu & = & \frac{1}{n} \sum_{i=1}^n x_i & {} \end{eqnarray} $$ That's the average we know and love! So the fact you should remember: **The mean is the value that minimizes RSS for a dataset** It equals the value where the derivative of RSS is 0: ```{r} our_mean <- sum(diamonds$depth) / nrow(diamonds) plot(mu_candidates, sapply(mu_candidates, rss_deriv), xlab="Depth", ylab="RSS Derivative", type="l", lwd=1.3) abline(v=our_mean, lwd=2.3, col="blue") abline(h=0, lty=2, lwd=1.6) ``` It is the value that minimizes RSS: ```{r} plot(mu_candidates, sapply(mu_candidates, rss), xlab="Depth", ylab="RSS Derivative", type="l", lwd=1.3) abline(v=our_mean, lwd=2.3, col="blue") ``` And it serves as an estimate of central tendency of the dataset: ```{r} hist(diamonds$depth, main="Depth Histogram", xlab="Depth", nclass=200) abline(v=median_depth, lwd=2.3, col="red") abline(v=our_mean, lwd=2.3, col="blue") ``` Note that in this dataset the mean and median are not exactly equal, but are very close: ```{r} diamonds %>% summarize(mean_depth = mean(depth), median_depth = median(depth)) ``` One last note, there is a similar argument to define the median as a measure of _center_. In this case, instead of using RSS we use a different criterion: the sum of absolute deviations $\sum_{i=1}^n |x_i - m|$. The median is the minimizer of this criterion. ```{r} sad <- function(m) sum(abs(diamonds$depth - m)) plot(mu_candidates, sapply(mu_candidates, sad), xlab="Depth", ylab="Sum of Absolute Deviations", type="l", lwd=1.3) abline(v=median(diamonds$depth), lwd=2.3, col="red") ``` ### Spread Now that we have a measure of center, we can now discuss how data is _spread_ around that center. For the mean, we have a convenient way of describing this: the average distance (using squared difference) from the mean. We call this the _variance_ of the data: $$ \mathrm{var}(x) = \frac{1}{n} \sum_{i=1}^n (x_i - \overline{x})^2 $$ You will also see it with a slightly different constant in the front for technical reasons that we may discuss later on: $$ \mathrm{var}(x) = \frac{1}{n-1} \sum_{i=1}^n (x_i - \overline{x})^2 $$ Variance is a commonly used statistic for spread but it has the disadvantage that its units are not easy to conceptualize (e.g., squared diamond depth). A spread statistic that is in the same units as the data is the _standard deviation_, which is just the squared root of variance: $$ \mathrm{sd}(x) = \sqrt{\frac{1}{n}\sum_{i=1}^n (x_i - \overline{x})^2} $$ We can also use _standard deviations_ as an interpretable unit of how far a given data point is from the mean: ```{r} mean_depth <- mean(diamonds$depth) sd_depth <- sd(diamonds$depth) hist(diamonds$depth, main="Depth Histogram", xlab="Depth", nclass=200) abline(v=mean_depth, lwd=2.3, col="blue") abline(v=mean_depth + sd_depth * seq(-6,6), lty=2, lwd=3.5-abs(seq(-3,3,len=13))) ``` As a rough guide, we can use "standard deviations away from the mean" as a measure of spread as follows: | SDs | proportion | Interpretation | |-----|------------|----------------| | 1 | `r round(1-2*pnorm(-1),2)` | `r 100*round(1-2*pnorm(-1),2)`% of the data is within $\pm$ 1 sds | | 2 | `r round(1-2*pnorm(-2),2)` | `r 100*round(1-2*pnorm(-2),2)`% of the data is within $\pm$ 2 sds | | 3 | `r round(1-2*pnorm(-3),4)` | `r 100*round(1-2*pnorm(-3),4)`% of the data is within $\pm$ 3 sds | | 4 | `r round(1-2*pnorm(-4),6)` | `r 100*round(1-2*pnorm(-4),6)`% of the data is within $\pm$ 4 sds | | 5 | `r round(1-2*pnorm(-5),8)` | `r 100*round(1-2*pnorm(-5),8)`% of the data is within $\pm$ 5 sds | | 6 | `r round(1-2*pnorm(-6),10)` | `r 100*round(1-2*pnorm(-6),10)`% of the data is within $\pm$ 6 sds | We will see later how these rough approximations are derived from a mathematical assumption about how data is distributed _beyond_ the data we have at hand. ### Spread estimates using rank statistics Just like we saw how the median is a rank statistic used to describe central tendency, we can also use rank statistics to describe spread. For this we use two more rank statistics: the first and third _quartiles_, $x_{(n/4)}$ and $x_{(3n/4)}$ respectively: ```{r} first_quartile <- quantile(diamonds$depth, p=1/4) third_quartile <- quantile(diamonds$depth, p=3/4) hist(diamonds$depth, main="Depth Histogram", xlab="Depth", nclass=200) abline(v=median_depth, lwd=2.3, col="red") abline(v=c(first_quartile, third_quartile), lwd=2.3, col="red", lty=2) ``` Note, the five order statistics we have seen so far: minimum, maximum, median and first and third quartiles are so frequently used that this is exactly what `R` uses by default as a `summary` of a numeric vector of data (along with the mean): ```{r} summary(diamonds$depth) ``` This five-number summary are also all of the statistics used to construct a boxplot to summarize data distribution. ### Inter-quartile range One last value of interest is the _inter-quartile range_ which is defined as the difference between the third and first quartile: $\mathrm{IQR}(x) = x_{(3n/4)} - x_{(1/4)}. The interpretation here is that half the data is within the IQR around the median. ```{r} diamonds %>% summarize(sd_depth = sd(depth), iqr_depth = IQR(depth)) ``` ## Skew One last thought. Although there are formal ways of defining this precisely, the five-number summary can be used to understand if data is skewed. How? Consider the differences between the first and third quartiles to the median: ```{r} first_difference <- median_depth - first_quartile third_difference <- third_quartile - median_depth c(first_difference, third_difference) ``` If one of these differences is larger than the other, then that indicates that this dataset might be skewed, that is, that the range of data on one side of the median is longer (or shorter) than the range of data on the other side of the median. Do you think our diamond depth dataset is skewed? ## Covariance and correlation The scatter plot is a visual way of observing relationships between pairs of variables. Like descriptions of distributions of single variables, we would like to construct statistics that summarize the relationship between two variables quantitatively. To do this we will extend our notion of _spread_ (or variation of data around the mean) to the notion of _co-variation_: do pairs of variables vary around the mean in the same way. Consider now data for two variables over the same $n$ entities: $(x_1,y_1), (x_2,y_2), \ldots, (x_n,y_n)$. For example, for each diamond, we have `carat` and `price` as two variables: ```{r} diamonds %>% ggplot(aes(x=carat, y=price)) + geom_point() + geom_hline(aes(yintercept = mean(price)), color="blue", lty=2) + geom_vline(aes(xintercept = mean(carat)), color="blue", lty=2) ``` We want to capture the relationship: $x_i$ varies in the same direction and scale away from its mean as $y_i$. This leads to _covariance_ $$ cov(x,y) = \frac{1}{n} \sum_{i=1}^n (x_i - \overline{x})(y_i - \overline{y}) $$ Think of what would the covariance for $x$ and $y$ be if $x_i$ varies in the _opposite_ direction as $y_i$? Just like variance, we have an issue with units and interpretation for covariance, so we introduce _correlation_ (formally, Pearson's correlation coefficient) to summarize this relationship in a _unit-less_ way: $$ cor(x,y) = \frac{cov(x,y)}{sd(x) sd(y)} $$ As before, we can also use rank statistics to define a measure of how two variables are associated. One of these, _Spearman correlation_ is commonly used. It is defined as the Pearson correlation coefficient of the ranks (rather than actual values) of pairs of variables. <file_sep>/materials/projects/Project4.Rmd --- title: "Project 4: Interactive Data Maps" author: "CMSC320" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` **Posted:** May 2nd, 2019 **Last Updated:** `r format(Sys.Date(), "%b %d, %Y")` **Due:** May 14th, 2019 Use the `leaflet` package and our previously used Baltimore crime dataset to make an interactive data map of Baltimore Crime. 1. Use this piece of code to download and prepare data for use in project ```{r get_and_prepare_data, message=FALSE, warning=FALSE} library(tidyverse) library(stringr) arrest_tab <- read_csv("http://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv") dat <- arrest_tab %>% filter(!is.na(`Location 1`)) %>% separate(`Location 1`, c("lat","lng"), sep=",") %>% mutate(lat=as.numeric(str_replace(lat, "\\(", ""))) %>% mutate(lng=as.numeric(str_replace(lng, "\\)", ""))) %>% sample_n(2000) dat ``` Note the attributes `lat` and `lng` which indicate geographical location as latitude (`lat`) and longitude (`lng`). 2. Use the `leaflet` package to create an interactive map of Baltimore ```{r start_map, warning=FALSE} library(leaflet) balto_map <- leaflet(dat) %>% addTiles() %>% setView(lat=39.29, lng=-76.61, zoom=11) balto_map ``` You can find more information about leaflet here: https://rstudio.github.io/leaflet/ 3. Add graphical elements to display the data. For instance, add circles, with colors indicating sex. Or circles with colors indicating race. Or anything else that strikes your fancy. These will be useful: - https://rstudio.github.io/leaflet/markers.html - https://www.rdocumentation.org/packages/leaflet/versions/1.1.0/topics/addControl 4. Embed your map in your Rmarkdown file, knit **to HTML** this time (not PDF) and submit the HTML file to ELMS. ## Submission Prepare and knit an Rmarkdown file that includes: (a) code to carry out each of the steps above, (b) output showing the result of your code (in this case the interactive map), and (c) a short prose description of your interactive map (i.e., what are you showing with this data and map). Remember, the writeup you are preparing is intended to communicate your data analysis effectively. Thoughtlessly showing large amounts of output in your writeup defeats that purpose. #### Grading A simple map, e.g., one showing the distribution of sex of people arrested across Baltimore, will earn full credit. However, more sophisticated analysis, or interactivity (useful informative popups) will earn extra credit applied to the final Projects grade, so I encourage you to be creative. <file_sep>/content/homeworks/datatypes_wrangling.md --- title: "Homework 1: Datatypes and Wrangling" date: "2018-02-08" --- In this assignment, you will explore and exercise your knowledge of data types, data operations and data plotting. **DUE**: Friday Feb. 7, 11:59pm <!--more--> ## Data types Choose a dataset from those linked on the [class resources page](/resources/) or found anywhere online. The only requirement is that this dataset is downloadable as a CSV (comma separated value) file. Answer the following questions: 1) Provide a URL to the dataset. 2) Explain why you chose this dataset. 3) What are the entities in this dataset? How many are there in this dataset? 4) How many attributes are there in this dataset? 5) What is the datatype of each attribute (categorical (ordered or unordered), numeric (discrete or continuous), datetime, geolocation, other)? Write a short sentence stating how you determined the type of each attribute. Do this for at least 5 attributes, if your dataset contains more than 10 attributes choose 10 of them to describe. 6) Write code that loads the dataset using function `read_csv`. Were you able to load the data successfully? If no, why not? If yes, show the first 10 rows of the dataset. ## Wrangling Write an operation pipeline including at minimum three of the operations we have learned in class: [Notes Section 6](/bookdown-notes/principles-basic-operations.html) and [Notes Section 7](/bookdown-notes/principles-more-operations.html). One of the operations should be a `group_by` and `summarize`. If you were able to successfully load your dataset above, you can write the operation pipeline to analyze your dataset. Otherwise, write a pipeline using the `flights` dataset included in package `nycflights13`. Also available as a csv file [here](/misc/nyc_flights.csv). 1) Write a couple of sentences describing the pipeline you wrote and why is it useful as an analysis of the dataset. 2) Provide code executing the pipeline and displaying at most the first 10 rows of the result. **Hint**: In the `flights` dataset you could create a pipeline to make a data frame that lists, in increasing order, the average total delay for each carrier on flights departing from JFK, where total delay is the sum of departure and arrival delays. ## Plotting Make one plot using `ggplot` of the result of the pipeline you created above. Refer to [Notes Section 8](/bookdown-notes/basic-plotting-with-ggplot.html) to see some examples. 1) Write text describing what you are plotting 2) Provide code to create and show the resulting plot ## Submitting ### Rmarkdown Download the Rmarkdown file here: [HW1 Rmarkdown shell](/misc/hw1_datatypes_wrangling.Rmd) and fill in with your answers. Knit as PDF (or HTML and then print to PDF) and submit to ELMS. You can see an example submission here: [HW1 Rmarkdown submission example](/misc/hw1_datatypes_wrangling_sample.Rmd) [HW1 Rmarkdown PDF submission example](/misc/hw1_datatypes_wrangling_rmd_sample.pdf) ### Jupyter Notebook Download the notebook here: [HW1 ipynb shell](/misc/hw1_datatypes_wrangling.ipynb) and fill in with your answers. Export as PDF (or HTML and then print to PDF) and submit to ELMS. You can see an example submission here: [HW1 ipynb submission example](/misc/hw1_datatypes_wrangling_sample.ipynb) [HW1 ipynb PDF submission example](/misc/hw1_datatypes_wrangling_ipynb_sample.pdf) <file_sep>/materials/projects/project2_sql.Rmd ```{r connect_db} db <- DBI::dbConnect(RSQLite::SQLite(), "lahman2016.sqlite") ``` ```{sql run_query, connection=db, output.var="payroll_df"} select ... from ... where ... ``` ```{r print_result} payroll_df %>% head() ```<file_sep>/materials/quizzes/pipes.md --- title: Pipes quiz author: CMSC498 Fall 2016 date: September 14, 2016 --- Name(s): UID(s): You are working for real estate website Zillow and you want to calculate income distributions in a geographically sensible way. You have a data.frame with two columns `income` and `address` where addresses are in form "Address line 1\\nAddress line 2\\nCity, State, Zip" for some number of households. You think that calculating income based on the first three digits of the zip code makes sense and you want to write a pipeline to extract that information. You have access the following functions: - `select(data_frame, columns)`: takes a data frame and returns a data frame with only the given `columns` - `split_address(addresses)`: takes a vector of addresses in format above and returns a data.frame with columns `line_1`, `line_2`, `city`, `state`, `zip` - `extract_prefix(prefix_size, vector)`: takes a string vector `vector` and returns the prefix of size `prefix_size` from each string in the vector Write a short program using the pipe `%>%` operator that generates a data.frame containing the first three digits of the zip code and income for each household in the dataset. <file_sep>/materials/lectures/Wrangling/wrangling_dplyr.Rmd --- title: "Data Wrangling with `dplyr`" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r, echo=FALSE, message=FALSE} knitr::opts_chunk$set(cache=TRUE) library(png) library(grid) library(tidyr) library(dplyr) ``` In previous lectures we discussed the `data.frame` to introduced the structure we usually see in a dataset before we start analysis: 1. Each attribute/variable forms a column 2. Each entity/(observational unit) forms a row 3. Each type of entity/(observation unit) forms a table Although we did not explicitly mentioned number 3, in more complex datasets we want to make sure we divide different entity types into their respective table. We will discuss this in more detail when we see data models (in the database sense) later on. We will refer to data organized in this fashion as _tidy data_. In this section we introduce operations and manipulations that commonly arise in analyses. We center our discussion around the idea that we are operating over tidy data, and we want to ensure that the operations we apply also generate tidy data as a result. ## `dplyr` We will use the `dplyr` package to introduce these oprations. I think it is one of the most beautiful tools created for data analysis. It clearly defines and efficiently implements most common data manipulation operations (verbs) one comes across in data analysis. It is built around tidy data principles. It also presents uniform treatment of multiple kinds of data sources (in memory files, partially loaded files, databases). It works best when used in conjuction with the non-standard _pipe_ operator (`%>%`) first introduced by the `magrittr` package. This simple syntactic sugar is extremely powerful. It is used to elegantly chain multiple manipulation operations: ```{r, eval=FALSE} # suppose we wanted to apply two manipulations filter_first_column <- function(data, arg) { data[data[,1] == arg, ] } select_column <- function(data, col) { data[,col] } # using standard function application select_column(filter_first_column(dat, 10), 2) # the pipe operator lhs %>% func_call(args) inserts lhs # as the first argument of the func_call on the right hand side # using pipe operator, this is much more elegant dat %>% filter_first_column(10) %>% select_column(2) ``` A complete introduction to `dplyr` is found here: [http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html](http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html) We will use a dataset of inbound and outbound flights to New York City as an example: ```{r} library(nycflights13) data(flights) ``` ## Single-table manipulation We will first look at operations that work over a single table at a time. Single table verbs: - `filter()` and `slice()`: subset observations (entities) - `arrange()`: sort observations (entities) - `select()` and `rename()`: subset variables (attributes) - `distinct()`: make entities unique - `mutate()` and `transmutate()`: add a new variable (attribute) - `summarize()`: compute a summary statistics for one or more variables - `sample_n()` and `sample_frac()`: sample observations from a data table ### Subsetting Observations The first fundamental operation we learned about early this semester is subsetting, or filtering, observations (entities, rows) in a dataset. Recall that we could subset by a set of indices (say, all even rows, this is used when splitting datasets to train and test statistical models). Much more useful is the ability to filter observations based on attribute values. ```{r, fig.width=8, fig.height=4, echo=FALSE} img <- readPNG("subset.png") grid.raster(img) ``` ```{r, eval=FALSE} # include only flights on United Airlines flights %>% filter(carrier == "UA") # select even samples, note function `n` defined by dplyr flights %>% slice(seq(1, n(), by=2)) ``` ### Subsetting Variables On occasion, we may want to restrict a data analysis to a subset of variables (attributes, columns) to improve efficiency or interpretability. ```{r, fig.width=7, fig.height=3.5, echo=FALSE} img <- readPNG("select.png") grid.raster(img) ``` ```{r, eval=FALSE} # select only month carrier and origin variables flights %>% select(month, carrier, origin) ``` On large, complex, datasets the ability to perform this selection based on properties of column/attribute names is very powerful. For instance, in the `billboard` dataset we saw in a previous unit, we can select columns using partial string matching: ```{r, eval=FALSE} billboard %>% select(starts_with("wk")) ``` ### Creating New Variables One of the most common operations in data analysis is to create new variables (attributes), based on other existing attributes. ```{r, fig.width=8, fig.height=4, echo=FALSE} img <- readPNG("mutate.png") grid.raster(img) ``` These manipulations are used for transformations of existing single variables, for example, squaring a given varaible (`x -> x^2`), to make visualization or other downstream analysis more effective. In other cases, we may want to compute functions of existing variables to improve analysis or interpretation of a dataset. Here is an example creating a new variable as a function of two existing variables ```{r, eval=FALSE} # add new variable with total delay flights %>% mutate(delay=dep_delay + arr_delay) ``` ### Summarizing Data Much of statistical analysis, modeling and visualization is based on computing summaries (refered to as summary statistics) for variables (attributes), or other data features, of datasets. The `summarize` operation summarizes one variable (columns) over multiple observations (rowss) into a single value. ```{r, fig.width=8, fig.height=4, echo=FALSE} img <- readPNG("summarize.png") grid.raster(img) ``` ```{r, eval=FALSE} # compute mean total delay across all flights flights %>% mutate(delay = dep_delay + arr_delay) %>% summarize(mean_delay = mean(delay, na.rm=TRUE), min_delay = min(delay, na.rm=TRUE), max_delay = max(delay, na.rm=TRUE)) ``` ### Grouping Data Aggregation and summarization also go hand in hand with data grouping, where aggregates, or even variable transformations are performed _conditioned_ on other variables. The notion of _conditioning_ is fundamental and we will see it very frequently through the course. It is the basis of statistical analysis and Machine Learning models for regression and prediction, and it is essential in understanding the design of effective visualizations. ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("groupby.png") grid.raster(img) ``` So the goal is to group observations (rows) with the same value of one or more variables (columns). In the `dplyr` implementation, the `group_by` function in essence annotates the rows of a data table as belonging to a specific group. When `summarize` is the applied onto this annotated data table, summaries are computed for each group, rather than the whole table. ```{r, eval=FALSE} # compute mean total delay per carrier flights %>% mutate(delay = dep_delay + arr_delay) %>% group_by(carrier) %>% summarize(delay=mean(delay, na.rm=TRUE)) ``` ## Two-table manipulation We saw above, manipulations defined over single tables. In this section we look at efficient methods to combine data from multiple tables. The fundamental operation here is the `join`, which is a workhorse of database system design and impementation. The `join` operation combines rows from two tables to create a new single table, based on matching criteria specified over attributes of each of the two tables. Consider the example of joining the `flights` and `airlines` table: ```{r} head(flights) head(airlines) ``` Here, we want to add airline information to each flight. We can do so by joining the attributes of the respective airline from the `airlines` table with the `flights` table based on the values of attributes `flights$carrier` and `airlines$carrier`. Specifically, every row of `flights` with a specific value for `flights$carrier`, is joined with the the corresponding row in `airlines` with the same value for `airlines$carrier`. We will see four different ways of performing this operation that differ on how non-matching observations are handled. ### Left Join In this case, all observations on left operand (LHS) are retained: ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("join_lhs.png") grid.raster(img) ``` ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("left_join.png") grid.raster(img) ``` ```{r, eval=FALSE} flights %>% left_join(airlines, by="carrier") ``` RHS variables for LHS observations with no matching RHS observations are coded as `NA`. #### Right Join All observations on right operand (RHS) are retained: ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("join_lhs.png") grid.raster(img) ``` ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("right_join.png") grid.raster(img) ``` ```{r, eval=FALSE} flights %>% right_join(airlines, by="carrier") ``` LHS variables for RHS observations with no matching LHS observations are coded as `NA`. #### Inner Join Only observations matching on both tables are retained ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("join_lhs.png") grid.raster(img) ``` ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("inner_join.png") grid.raster(img) ``` ```{r, eval=FALSE} flights %>% inner_join(airlines, by="carrier") ``` #### Full Join All observations are retained, regardless of matching condition ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("join_lhs.png") grid.raster(img) ``` ```{r, fig.width=6.5, fig.height=2.5, echo=FALSE} img <- readPNG("full_join.png") grid.raster(img) ``` ```{r, eval=FALSE} flights %>% full_join(airlines, by="carrier") ``` All values coded as `NA` for non-matching observations as appropriate. ### Join conditions All join operations are based on a matching condition: ```{r, eval=FALSE} flights %>% left_join(airlines, by="carrier") ``` specifies to join observations where `flights$carrier` equals `airlines$carrier`. In this case, where no conditions are specified using the `by` argument: ```{r, eval=FALSE} flights %>% left_join(airlines) ``` a *natural join* is perfomed. In this case all variables with the same name in both tables are used in join condition. You can also specify join conditions on arbitrary attributes using the `by` argument. ```{r, eval=FALSE} flights %>% left_join(airlines, by=c("carrier" = "name")) ``` ### Filtering Joins We've just seen *mutating joins* that create new tables. *Filtering joins* use join conditions to filter a specific table. ```{r} flights %>% anti_join(airlines, by="carrier") ``` Filters the `flights` table to only include flights from airlines that *are not* included in the `airlines` table. Final note on `dplyr` ======================================== - Very efficient implementation of these operations. - More info: [http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html](http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html) - Cheatsheet: [http://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf](http://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf) <file_sep>/materials/slides/stat_next/stat_next.Rmd --- title: "Statistical Principles (Part 2)" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Statistical Principles (Part 2)] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] ```{r setup, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` --- layout: true ## Inference One way to think about how we use probability in data analysis (statistical and machine learning) is like this: --- .center.image-60[![](img/inference.png)] --- .center.image-60[![](img/inference1.png)] --- layout: true ## Inference --- **Law of Large Numbers (LLN)**: parameter $\hat{p}$ will be close to $p$ on average, **Central Limit Theorem (CLT)**: how confident are we that we found $p$. -- **Confidence Interval**: Since $\hat{p} \sim N(p,\frac{\sqrt{p(1-p)}}{\sqrt{n}})$ let's find an interval $[\hat{p}_{-}, \hat{p}_{+}]$, with: - $\hat{p}$ at its center, - contains 95% of the probability specified by the CLT. --- How do we calculate this interval? $\hat{p}_{-}$ will be the value where $N\left( p,\frac{\sqrt{p(1-p)}}{\sqrt{n}} \right)$ is such that $P(Y \leq \hat{p}_{-}) = .05/2$. -- In R, we calculate with `qnorm`: $$ \begin{align} \hat{p}_{-} & = \mathtt{qnorm}(.05/2, \hat{p}, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}) \\ {} & = \hat{p} + \mathtt{qnorm}(.05/2,0, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}}) \end{align} $$ --- The upper value of the interval is computed with probability $1-(.05/2)$, By the symmetry of the normal distribution is $$\hat{p}_{+} = \hat{p} + -\mathtt{qnorm}(.05/2,0, \frac{\sqrt{\hat{p}(1-\hat{p})}}{\sqrt{n}})$$ --- Let's see how these intervals look for our twitter bot example: ```{r, message=FALSE, echo=FALSE} library(dplyr) get_estimate <- function(n, p=0.7) mean(sample(c(0,1), size=n, replace=TRUE, prob=c(1-p,p))) set.seed(1) # let's construct confidence intervals for samples of size n=10,100,500,1000,10000 tab <- data.frame(sample_size=c(10,100,500,1000,10000)) %>% mutate(phat = sapply(sample_size,get_estimate)) %>% mutate(se = sqrt(phat*(1-phat)) / sqrt(sample_size)) %>% mutate(lower = phat + qnorm(.05/2, sd=se)) %>% mutate(upper = phat + -qnorm(.05/2, sd=se)) knitr::kable(tab, format="html", digits=3) ``` For $n=500$, our estimate of $p$ is ${}_{`r round(tab$lower[3],2)`}`r round(tab$phat[3],2)`_{`r round(tab$upper[3],2)`}$. --- layout: true ## Hypothesis testing --- Suppose that **before** I sampled tweets I thought (_hypothesized_) that more than 50% of tweets are bot-generated. -- **Hypothesis Testing** A very popular way of using data to suggest this hypothesis is **true**: -- By using inference to **reject** the hypothesis that it is **not true**. --- _null_ hypothesis: **50% or less of tweets are bot-generated** _alternative_ hypothesis (the one we cared about): **more than 50% of tweets are bot-generated** -- You will see this written in statistics textbooks as: $$ \begin{align} H_0: \, & p \leq .5 & \textrm{(null)} \\ H_1: \, & p > .5 & \textrm{(alternative)} \end{align} $$ --- Given sample of $n$ tweets, estimate $\hat{p}$ as we did before. If $\hat{p}$ (sample mean from our sample of tweets) is _too far_ from $p=.5$: then we **reject** the _null_ hypothesis: the estimate we derived from the data we have is not statistically consistent with the _null_ hypothesis. --- class:split-50 How do we say our estimate $\hat{p}$ is too far? Use the probability model given by the CLT. If $P(Y \geq \hat{p}) \geq .95$ under the null model (of $p=.5$), we say it is too far and we reject. .column.center.image-80[![](img/testing.png)] .column.center.image-80[![](img/testing1.png)] --- This 95% threshold is conservative, but somewhat arbitrary. So we use one more metric, $P(|Y| \geq \hat{p})$ (the infamous p-value) to say: We could reject the _null_ hypothesis for all thresholds greater than this p-value. --- Let's see how testing would look like for our tweet example ```{r, echo=FALSE} tab <- tab %>% mutate(p_value = 1-pnorm(phat, mean=.5, sd=se)) knitr::kable(tab, format='html', digits=3) ``` --- ## The $t$-test These results hold for $n$ sufficiently large that the normal distribution in the CLT provides a good approximation of the distribution of estimates $\hat{p}$. In cases where $n$ is smaller, the $t$-distribution, as opposed to the normal distribution, provides a better approximation of the distribution of estimates $\hat{p}$. As $n$ grows, the $t$-distribution approaches a normal distribution which is why analysts use the $t$-test regularly. --- ### A/B Testing A classic experimental design where hypothesis testing is commonly used in A/B testing. .center![](img/A-B_testing.png) --- Here we have two estimates $\hat{p}_A$ and $\hat{p}_B$, the proportion of clicks for design A and B respectively. The null hypothesis we would test is that _there is no difference in proportions_ between the two designs. Mathematically, we would like to know "What is the probability that we observe a difference in proportions this large under the null hypothesis". We will work this out as a homework exercise (HW4). --- layout: true ## Summary --- **Inference**: estimate parameter from data based on assumed probability model (e.g, matching expectation; we'll see later another method called maximum likelihood). -- For _averages_ the LLN and CLT tells us how to compute probabilities from a single parameter estimate derived from one dataset of samples. With these probabilities we can construct confidence intervals for our estimate. --- **Testing**: Use probability _under null hypothesis_ to see how statistically consistency of estimates obtained from data, Reject the null hypothesis if estimates are not statistically consistent enough (again using probability from CLT when dealing with averages). --- layout: false ## Probability Distributions Check lecture notes for further discussion of the probability distributions we saw in this discussion. --- layout: true ## Joint and conditional probability --- Suppose that for each tweet I sample I can also say if it has _a lot_ of retweets or not. I have another binary random variable $Y \in \{0,1\}$ where $Y=1$ indicates the sampled tweet has a lot of retweets. --- We could illustrate the population of "all" tweets as .center.image-50[![](img/joint.png)] --- We can talk of the joint probability mass function of $X$ and $Y$: $p(X=x, Y=y)$, where random variables $X$ and $Y$ can take values from domains $\mathcal{D}_X$ and $\mathcal{D}_Y$ respectively. Here we have the same conditions as we had for univariate distributions: 1. $p(X=x,Y=y)\geq 0$ for all combination of values $x$ and $y$, and 2. $\sum_{(x,y) \in \mathcal{D}_X \times \mathcal{D}_Y} p(X=x,Y=y) = 1$ --- We can also talk about _conditional probability_: the probability of a tweet being bot-generated or not, _conditioned_ on whether it has lots of retweets or not: $$ p(X=x | Y=y) $$ which also needs to satisfy the properties of a probability distribution. --- So to make sure $$ \sum_{x \in \mathcal{D}_X} p(X=x|Y=y) = 1 $$ we define $$ p(X=x | Y=y) = \frac{p(X=x,Y=y)}{p(Y=y)} $$ _marginalization_: follows from the properties of joint probability distribution: $\sum_{x \in \mathcal{D}_X} p(X=x, Y=y) = p(Y=y)$. --- Conditional probability lets us talk about _independence_: if the probabilty of a tweet being bot-generated _does not_ depend on a tweet having lots of retweets i.e., $p(X=x) = p(X=x|Y=y)$ for all $y$, then we say $X$ is _independent_ of $Y$. --- .center.image-40[![](img/joint.png)] Is $X$ independent of $Y$? What would the diagram look like if $X$ was independent of $Y$? --- For independent random variables, the joint probability has an easy form $$p(X=x,Y=y)=p(X=x)p(Y=y)$$ Generalizes to more than two independent random variables. --- layout: true ## Bayes' Rule --- An extremely useful and important rule of probability follows from our definitions of conditional and joint probability above. Bayes' rule is pervasive in Statistics, Machine Learning and Artificial Intelligence. It is a very powerful tool to talk about uncertainty, beliefs, evidence, and many other technical and philosophical matters. It is however, of extreme simplicity. --- Bayes' Rule states that $$ p(X=x|Y=y) = \frac{p(Y=y|X=x)p(X=x)}{p(Y=y)} $$ which follow directly from our definitions above. --- One very common usage of Bayes' Rule is that it let's us define one conditional probability distribution based on another probability distribution. For example, it may be hard to reason about $p(X=x|Y=y)$ in our tweet example. -- If you know a tweet has a lot retweets $(Y=1)$, what can you say about the probability that it is bot-generated, i.e., $p(X=1|Y=1)$? Maybe not much, tweets have lots of retweets for many reasons. --- However, it may be easier to reason about the reverse: if I tell you a tweet is bot-generated $(X=1)$, what can you say about the probability that it has a lot of retweets, i.e., $p(Y=1|X=1)$? That may be easier to reason about, at least bot-generated tweets are designed to get lots of retweets. At minimum, it's easier to estimate because we can get a training set of bot-generated tweets and _estimate_ this conditional probability. --- Bayes' Rule tells us how to get the hard to reason about (or estimate) conditional probability $p(X=x|Y=y)$ In terms of the conditional probability that is easier to reason about (or estimate) $p(Y=y|X=x)$. This is the basis of the Naive Bayes prediction method, which we'll revisit briefly later on. --- layout: true ## Conditional expectation --- With conditional probabilty we can start talking about conditional expectation, which generalizes the concept of expectation we saw before. The _conditional expected value_ (conditional mean) of $X$ given $Y=y$ is $$ \mathbb{E} [ X|Y=y ] = \sum_{x \in \mathcal{D}_X} x p(X=x|Y=y) $$ Conditional Expectation, which follows from conditional probability, will serve as the basis for our Machine Learning method studies in the next few lectures! --- layout: true ## Maximum likelihood --- We saw before how we estimated a parameter from matching expectation from a probability model with what we observed in data. The most popular method of estimation (Maximum Likelihood Estimation) uses a similar idea. --- Given data $x_1,x_2,\ldots,x_n$ and an assumed model of their distribution, e.g., - $X_i\sim \mathrm{Bernoulli}(p)$ for all $i$, - they are iid, Let's find the value of parameter $p$ that maximizes the likelihood (or probability) of the data we observe under this assumed probability model. We call the resulting estimate the _maximum likelihood estimate_ (MLE). --- Here are some fun exercises to try: 1) Given a sample $x_1$ with $X_1 \sim N(\mu,1)$, show that the maximum likelihood estimate of $\mu$, $\hat{\mu}=x_1$. -- It is most often convinient to _minimize negative log-likelihood_ instead of maximizing likelihood. So in this case: $$ \begin{align} -\mathscr{L}(\mu) & = - \log p(X_1=x_1) \\ {} & = \log{\sqrt{2\pi}} + \frac{1}{2}(x_1 - \mu)^2 \end{align} $$ --- To minimize $$-\mathscr{L}(\mu) = \log{\sqrt{2\pi}} + \frac{1}{2}(x_1 - \mu)^2$$ Ignore terms that are independent of $\mu$, and concentrate only on minimizing the last term. Now, this term is always positive, so the smallest value it can have is 0. So, we minimize it by setting $\hat{\mu}=x_1$. --- 2) Given a sample $x_1,x_2,\ldots,x_n$ of $n$ iid random variables with $X_i \sim N(\mu,1)$ for all $i$, Show that the maximum likelihood estimate of $\mu$, $\hat{\mu}=\overline{x}$ the sample mean! --- Here we would follow a similar approach, write out the negative log likelihood as a function $f(\mu;x_i)$ of $\mu$ that depends on data $x_i$. Two useful properties here are: 1. $p(X_1=x_1,X_2=x_2,\ldots,X_n=x_n)=p(X_1=x_1)p(X_2=x_2)\cdots p(X_n=x_n)$, 2. $\log \prod_i f(\mu;x_i) = \sum_i \log f(\mu;x_i)$ Then find a value of $\mu$ that minimizes this function. Hint: we saw this when we showed that the sample mean is the minimizer of total squared distance in our exploratory analysis unit! <file_sep>/materials/slides/model_selection/index.html <!DOCTYPE html> <html lang="" xml:lang=""> <head> <title>Model Selection</title> <meta charset="utf-8" /> <meta name="author" content="<NAME>" /> <meta name="date" content="2020-04-26" /> <link href="libs/remark-css-0.0.1/default.css" rel="stylesheet" /> <link href="libs/remark-css-0.0.1/default-fonts.css" rel="stylesheet" /> <link rel="stylesheet" href="custom.css" type="text/css" /> </head> <body> <textarea id="source"> class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Classifier Evaluation and Model Selection] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: 2020-04-26 ] .logo[![](img/logo.png)] --- layout: true ## Classifier evaluation --- How do we determine how well classifiers are performing? One way is to compute the _error rate_ of the classifier, the percent of mistakes it makes when predicting class --- class: split-50 .column[ ```r logis_fit &lt;- glm(default ~ balance, data=Default, family="binomial") logis_pred_prob &lt;- predict(logis_fit, type="response") logis_pred &lt;- ifelse(logis_pred_prob &gt; 0.5, "Yes", "No") print(table(predicted=logis_pred, observed=Default$default)) # error rate mean(Default$default != logis_pred) * 100 # dummy error rate mean(Default$default != "No") * 100 ``` ] .column[ ``` ## observed ## predicted No Yes ## No 9625 233 ## Yes 42 100 ``` ``` ## [1] 2.75 ``` ``` ## [1] 3.33 ``` ] --- We need a more precise language to describe classification mistakes: | | True Class + | True Class - | Total | |------------------:|:--------------------|---------------------|-------| | Predicted Class + | True Positive (TP) | False Positive (FP) | P* | | Predicted Class - | False Negative (FN) | True Negative (TN) | N* | | Total | P | N | | --- Using these we can define statistics that describe classifier performance | Name | Definition | Synonyms | |--------------------------------:|:-----------|---------------------------------------------------| | False Positive Rate (FPR) | FP / N | Type-I error, 1-Specificity | | True Positive Rate (TPR) | TP / P | 1 - Type-II error, power, sensitivity, **recall** | | Positive Predictive Value (PPV) | TP / P* | **precision**, 1-false discovery proportion | | Negative Predicitve Value (NPV) | FN / N* | | --- In the credit default case we may want to increase **TPR** (recall, make sure we catch all defaults) at the expense of **FPR** (1-Specificity, clients we lose because we think they will default) --- This leads to a natural question: Can we adjust our classifiers TPR and FPR? Remember we are classifying `Yes` if $$ \log \frac{P(Y=\mathtt{Yes}|X)}{P(Y=\mathtt{No}|X)} &gt; 0 \Rightarrow \\ P(Y=\mathtt{Yes}|X) &gt; 0.5 $$ What would happen if we use `\(P(Y=\mathtt{Yes}|X) &gt; 0.2\)`? --- ![](index_files/figure-html/unnamed-chunk-4-1.png)&lt;!-- --&gt; --- A way of describing the TPR and FPR tradeoff is by using the **ROC curve** (Receiver Operating Characteristic) and the **AUROC** (area under the ROC) --- ![](index_files/figure-html/unnamed-chunk-5-1.png)&lt;!-- --&gt; --- class: split-30 Consider comparing a logistic regression model using all predictors in the dataset, including an interaction term between balance and student. .column[ `default ~ balance*student + income` ] .column[ &lt;img src="index_files/figure-html/unnamed-chunk-8-1.png" style="display: block; margin: auto;" /&gt; ] --- Another metric that is frequently used to understand classification errors and tradeoffs is the precision-recall curve: &lt;img src="index_files/figure-html/unnamed-chunk-9-1.png" style="display: block; margin: auto;" /&gt; --- The bigger model shows a slightly higher precision at the same recall values and slightly higher area under the precision-recall curve. This is commonly found in datasets where there is a skewed distribution of classes (e.g., there are many more "No" than "Yes" in this dataset). The area under the PR curve tends to distinguish classifier performance than area under the ROC curve in these cases. --- layout: true ## Model Selection --- Our goal when we use a learning model like linear or logistic regression, decision trees, etc., is to learn a model that can predict outcomes for new unseen data. --- We should therefore think of model evaluation based on _expected predicted error_: what will the prediction error be for data _outside_ the training data. -- How then, do we measure our models' ability to predict unseen data, when we only have access to training data? --- layout: true ## Cross-validation --- The most common method to evaluate model **generalization** performance is _cross-validation_. It is used in two essential data analysis phases: _Model Selection_ and _Model Assessment_. --- ### Model Selection Decide what kind, and how complex of a model we should fit. -- Consider a regression example: I will fit a linear regression model, what predictors should be included?, interactions?, data transformations? -- Another example is what classification tree depth to use. -- Which kind of algorithm to use, linear regression vs. decision tree vs. random forest --- ### Model Assessment Determine how well does our selected model performs as a **general** model. -- Ex. I've built a linear regression model with a specific set predictors. How well will it perform on unseen data? -- The same question can be asked of a classification tree of specific depth. --- Cross-validation is a _resampling_ method to obtain estimates of **expected prediction error rate** (or any other performance measure on unseen data). In some instances, you will have a large predefined test dataset **that you should never use when training**. In the absence of access to this kind of dataset, cross validation can be used. --- layout: true ## Validation Set --- The simplest option to use cross-validation is to create a _validation_ set, where our dataset is **randomly** divided into _training_ and _validation_ sets. Then the _validation_ is set aside, and not used at until until we are ready to compute **test error rate** (once, don't go back and check if you can improve it). ![](images/validation.png) --- class: split-50 Let's look at our running example using automobile data, where we want to build a regression model to predict miles per gallon given other auto attributes. .column[A linear regression model was not appropriate for this dataset. Use _polynomial_ regression as an illustrative example. ] .column[ <<<<<<< Updated upstream &lt;img src="index_files/figure-html/unnamed-chunk-10-1.png" style="display: block; margin: auto;" /&gt; ======= ![](index_files/figure-html/unnamed-chunk-2-1.png)&lt;!-- --&gt; >>>>>>> Stashed changes ] --- For polynomial regression, our regression model (for a single predictor `\(X\)`) is given as a `\(d\)` degree polynomial. `$$\mathbb{E}[Y|X=x] = \beta_0 + \beta_1 x + \beta_2 x^2 + \cdots + \beta_d x^d$$` For _model selection_, we want to decide what degree `\(d\)` we should use to model this data. --- class: split-50 .column[ Using the _validation set_ method, split our data into a training set, fit the regression model with different polynomial degrees `\(d\)` on the training set, measure test error on the validation set. ] .column[ <<<<<<< Updated upstream &lt;img src="index_files/figure-html/unnamed-chunk-12-1.png" style="display: block; margin: auto;" /&gt; ======= &lt;img src="index_files/figure-html/unnamed-chunk-4-1.png" style="display: block; margin: auto;" /&gt; >>>>>>> Stashed changes ] --- layout: true ## Resampled validation set --- The validation set approach can be prone to sampling issues. It can be highly variable as error rate is a random quantity and depends on observations in training and validation sets. -- We can improve our estimate of _test error_ by averaging multiple measurements of it (remember the law of large numbers). --- class: split-50 .column[ Resample validation set 10 times (yielding different validation and training sets) and averaging the resulting test errors.] .column[ <<<<<<< Updated upstream &lt;img src="index_files/figure-html/unnamed-chunk-14-1.png" style="display: block; margin: auto;" /&gt; ======= ![](index_files/figure-html/unnamed-chunk-6-1.png)&lt;!-- --&gt; >>>>>>> Stashed changes ] --- layout: true ## Leave-one-out Cross-Validation --- This approach still has some issues. Each of the training sets in our validation approach only uses 50% of data to train, which leads to models that may not perform as well as models trained with the full dataset and thus we can overestimate error. -- To alleviate this situation, we can extend our approach to the extreme: Make each single training point it's own validation set. --- class: split-50 .column[ Procedure: For each observation `\(i\)` in data set: a. Train model on all but `\(i\)`-th observation b. Predict response for `\(i\)`-th observation c. Calculate prediction error ] .column[![](images/loocv.png)] --- class: split-50 .column[ This gives us the following _cross-validation_ estimate of error. $$ CV_{(n)} = \frac{1}{n} \sum_i (y_i - \hat{y}_i)^2 $$ ] .column[![](images/loocv.png)] --- Advantages: - use `\(n-1\)` observations to train each model - no sampling effects introduced since error is estimated on each sample -- &lt;!-- something --&gt; Disadvantages: - Depending on the models we are trying to fit, it can be very costly to train `\(n-1\)` models. - Error estimate for each model is highly variable (since it comes from a single datapoint). --- On our running example <<<<<<< Updated upstream &lt;img src="index_files/figure-html/unnamed-chunk-16-1.png" style="display: block; margin: auto;" /&gt; ======= &lt;img src="index_files/figure-html/unnamed-chunk-8-1.png" style="display: block; margin: auto;" /&gt; >>>>>>> Stashed changes --- layout: true ## k-fold Cross-Validation --- This discussion leads us to the most commonly used cross-validation approach _k-fold Cross-Validation_. --- class: split-50 .column[ Procedure: Partition observations randomly into `\(k\)` groups (folds). For each of the `\(k\)` groups of observations: - Train model on observations in the other `\(k-1\)` folds - Estimate test-set error (e.g., Mean Squared Error) on this fold ] .column[![](images/kfoldcv.png)] --- class: split-50 .column[ Procedure: Compute average error across `\(k\)` folds `$$CV_{(k)} = \frac{1}{k} \sum_i MSE_i$$` where `\(MSE_i\)` is mean squared error estimated on the `\(i\)`-th fold ] .column[![](images/kfoldcv.png)] --- - Fewer models to fit (only `\(k\)` of them) - Less variance in each of the computed test error estimates in each fold. -- It can be shown that there is a slight bias (over estimating usually) in error estimate obtained from this procedure. --- Running Example <<<<<<< Updated upstream &lt;img src="index_files/figure-html/unnamed-chunk-18-1.png" style="display: block; margin: auto;" /&gt; ======= &lt;img src="index_files/figure-html/unnamed-chunk-10-1.png" style="display: block; margin: auto;" /&gt; >>>>>>> Stashed changes --- layout: true ## Cross-Validation in Classification --- Each of these procedures can be used for classification as well. In this case we would substitute MSE with performance metric of choice. E.g., error rate, accuracy, TPR, FPR, AUROC. -- Note however that not all of these work with LOOCV (e.g. AUROC since it can't be defined over single data points). --- layout: true ## Comparing models using cross-validation --- class: split-50 .column[ Suppose you want to compare two classification models (logistic regression vs. a decision tree) on the `Default` dataset. We can use Cross-Validation to determine if one model is better than the other, using a `\(t\)`-test for example. ] .column[ <<<<<<< Updated upstream ![](index_files/figure-html/unnamed-chunk-20-1.png)&lt;!-- --&gt; ======= ![](index_files/figure-html/unnamed-chunk-12-1.png)&lt;!-- --&gt; >>>>>>> Stashed changes ] --- Using hypothesis testing: &lt;table&gt; &lt;thead&gt; &lt;tr&gt; &lt;th style="text-align:left;"&gt; term &lt;/th&gt; &lt;th style="text-align:right;"&gt; estimate &lt;/th&gt; &lt;th style="text-align:right;"&gt; std.error &lt;/th&gt; &lt;th style="text-align:right;"&gt; statistic &lt;/th&gt; &lt;th style="text-align:right;"&gt; p.value &lt;/th&gt; &lt;/tr&gt; &lt;/thead&gt; &lt;tbody&gt; &lt;tr&gt; &lt;td style="text-align:left;"&gt; (Intercept) &lt;/td&gt; &lt;td style="text-align:right;"&gt; 0.0267 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 0.0020306 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 13.148828 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 0.0000000 &lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td style="text-align:left;"&gt; methodtree &lt;/td&gt; &lt;td style="text-align:right;"&gt; 0.0030 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 0.0028717 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 1.044677 &lt;/td&gt; &lt;td style="text-align:right;"&gt; 0.3099998 &lt;/td&gt; &lt;/tr&gt; &lt;/tbody&gt; &lt;/table&gt; In this case, we do not observe any significant difference between these two classification methods. --- layout: true ## Summary --- Model selection and assessment are critical steps of data analysis. Error and accuracy statistics are not enough to understand classifier performance. Classifications can be done using probability cutoffs to trade, e.g., TPR-FPR (ROC curve), or precision-recall (PR curve). Area under ROC or PR curve summarize classifier performance across different cutoffs. --- Resampling methods are general tools used for this purpose. k-fold cross-validation can be used to provide larger training sets to algorithms while stabilizing empirical estimates of expected prediction error </textarea> <style data-target="print-only">@media screen {.remark-slide-container{display:block;}.remark-slide-scaler{box-shadow:none;}}</style> <script src="https://remarkjs.com/downloads/remark-latest.min.js"></script> <script>var slideshow = remark.create({ "ratio": "16:9" }); if (window.HTMLWidgets) slideshow.on('afterShowSlide', function (slide) { window.dispatchEvent(new Event('resize')); }); (function(d) { var s = d.createElement("style"), r = d.querySelector(".remark-slide-scaler"); if (!r) return; s.type = "text/css"; s.innerHTML = "@page {size: " + r.style.width + " " + r.style.height +"; }"; d.head.appendChild(s); })(document); (function(d) { var el = d.getElementsByClassName("remark-slides-area"); if (!el) return; var slide, slides = slideshow.getSlides(), els = el[0].children; for (var i = 1; i < slides.length; i++) { slide = slides[i]; if (slide.properties.continued === "true" || slide.properties.count === "false") { els[i - 1].className += ' has-continuation'; } } var s = d.createElement("style"); s.type = "text/css"; s.innerHTML = "@media print { .has-continuation { display: none; } }"; d.head.appendChild(s); })(document); // delete the temporary CSS (for displaying all slides initially) when the user // starts to view slides (function() { var deleted = false; slideshow.on('beforeShowSlide', function(slide) { if (deleted) return; var sheets = document.styleSheets, node; for (var i = 0; i < sheets.length; i++) { node = sheets[i].ownerNode; if (node.dataset["target"] !== "print-only") continue; node.parentNode.removeChild(node); } deleted = true; }); })(); (function() { "use strict" // Replace <script> tags in slides area to make them executable var scripts = document.querySelectorAll( '.remark-slides-area .remark-slide-container script' ); if (!scripts.length) return; for (var i = 0; i < scripts.length; i++) { var s = document.createElement('script'); var code = document.createTextNode(scripts[i].textContent); s.appendChild(code); var scriptAttrs = scripts[i].attributes; for (var j = 0; j < scriptAttrs.length; j++) { s.setAttribute(scriptAttrs[j].name, scriptAttrs[j].value); } scripts[i].parentElement.replaceChild(s, scripts[i]); } })(); (function() { var links = document.getElementsByTagName('a'); for (var i = 0; i < links.length; i++) { if (/^(https?:)?\/\//.test(links[i].getAttribute('href'))) { links[i].target = '_blank'; } } })();</script> <script> remark.macros['scale'] = function (percentage) { var url = this; return '<img src="' + url + '" style=width: ' + percentage + '"/>'; }; </script> <script> slideshow._releaseMath = function(el) { var i, text, code, codes = el.getElementsByTagName('code'); for (i = 0; i < codes.length;) { code = codes[i]; if (code.parentNode.tagName !== 'PRE' && code.childElementCount === 0) { text = code.textContent; if (/^\\\((.|\s)+\\\)$/.test(text) || /^\\\[(.|\s)+\\\]$/.test(text) || /^\$\$(.|\s)+\$\$$/.test(text) || /^\\begin\{([^}]+)\}(.|\s)+\\end\{[^}]+\}$/.test(text)) { code.outerHTML = code.innerHTML; // remove <code></code> continue; } } i++; } }; slideshow._releaseMath(document); </script> <!-- dynamically load mathjax for compatibility with self-contained --> <script> (function () { var script = document.createElement('script'); script.type = 'text/javascript'; script.src = 'https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-MML-AM_CHTML'; if (location.protocol !== 'file:' && /^https?:/.test(script.src)) script.src = script.src.replace(/^https?:/, ''); document.getElementsByTagName('head')[0].appendChild(script); })(); </script> </body> </html> <file_sep>/materials/lecture-notes/04-datatypes.rmd # (Part) Data representation modeling, ingestion and cleaning {-} # Measurements and Data Types ```{r setup, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` Now that we have our tools ready, let's start doing some analysis. First, let's go over some principles of R as a data analysis environment. R is a computational environment for data analysis. It is designed around a _functional_ language, as opposed to _procedural_ languages like Java or C, that has desirable properties for the type of operations and workflows that are frequently performed in the course of analyzing datasets. In this exercise we will start learning some of those desirable properties while performing an analysis of a real dataset. ## A data analysis to get us going I'm going to do an analysis of Baltimore crime to guide our discussion of R. We'll use data downloaded from Baltimore City's awesome open data site (this was downloaded a couple of years ago so if you download now, you will get different results). The repository for this particular data is here. [https://data.baltimorecity.gov/Crime/BPD-Arrests/3i3v-ibrt](https://data.baltimorecity.gov/Crime/BPD-Arrests/3i3v-ibrt) ## Getting data We've prepared the data previously into a comma-separated value file (`.csv` file): each line contains attribute values (separated by commas) describing arrests in the City of Baltimore. The `read_csv` command is part of the `readr` R package and allows you to read a dataset stored in a csv file. This function is extremely versatile, and you can read more about it by using the standard help system in R: `?read_csv`. The result of running calling this function is the data itself, so, by running the function in the console, the result of the function is printed. **Note:** To download this dataset to follow along you can use the following code: ```{r, eval=FALSE} if (!dir.exists("data")) dir.create("data") download.file("https://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv", destfile="data/BPD_Arrests.csv") ``` To make use of this dataset we want to assign the result of calling `read_csv` (i.e., the dataset) to a variable: ```{r vars1, message=FALSE} library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") arrest_tab ``` ```{r echo=FALSE, eval=FALSE} arrest_tab$race <- factor(arrest_tab$race) arrest_tab$sex <- factor(arrest_tab$sex) arrest_tab$incidentOffense <- factor(arrest_tab$incidentOffense) ``` Now we can ask what _type_ of value is stored in the `arrest_tab` variable: ```{r type} class(arrest_tab) ``` The `data.frame` is a workhorse data structure in R. It encapsulates the idea of _entities_ (in rows) and _attribute values_ (in columns). We call these _rectangular datasets_. The other types `tbl_df` and `tbl` are added by `tidyverse` for improved functionality. We can ask other features of this dataset: ```{r questions} # This is a comment in R, by the way # How many rows (entities) does this dataset contain? nrow(arrest_tab) # How many columns (attributes)? ncol(arrest_tab) # What are the names of those columns? colnames(arrest_tab) ``` Now, in Rstudio you can view the data frame using `View(arrest_tab)`. ### Names, values and functions Let's review the concepts of names values and functions again. In the console, we've now written a few instructions, e.g. `View(arrest_tab)`. Let's take a closer look at how these instructions are put together. **_expressions_**: first of all, we call these instructions _expressions_, which are just text that R can evaluate into a value. `View(arrest_tab)` is an expression. **_values_**: so, what's a value? They are numbers, strings, data frames, etc. This is the data we will be working with. The number `2` is a value. So is the string `"Hector"`. So, what value is produced when R evaluates the expression `View(arrest_tab)`? Nothing, which we also treat as a value. That wasn't very interesting, but it does have a side effect: it shows the `arrest_tab` dataset in the Data viewer. How about a simpler expression: `arrest_tab`, what value is produced when R evaluates the expression `arrest_tab`? The data.frame containing that data. Try it out in the console. **_names_**: so if `arrest_tab` isn't a value, what is it? It is a _name_. We use these to refer to values. So, when we write the expression `arrest_tab`, we tell R we want the _value_ referenced by the name `arrest_tab`, that is, the data itself! ![](img/names_values.png) **_functions_**: Besides numbers, strings, data frames, etc. another important type of value is the _function_. Functions are a series of instructions that take some input value and produce a different value. The name `View` refers to the function that takes a data frame as input, and displays it in the Data viewer. Functions are called using the parentheses we saw before: `View(arrest_tab)`, the parentheses say that you are passing input `arrest_tab` to the function `View`. We'll see later how we can write our own functions. ## Entities and attributes As a reminder, we are using the term _entities_ to refer to the objects to which data in a dataset refers to. For instance, in our example dataset, each arrest is an _entity_. In a rectangular dataset (a data frame) this corresponds to rows in a table. We then say that a dataset contains _attributes_ for each entity. For instance, attributes of each arrest would be the person's _age_, the type of offense, the location, etc. In a rectangular dataset, this corresponds to the columns in a table. This language of _entities_ and _attributes_ is commonly used in the database literature. In statistics you may see _experimental units_ or _samples_ for _entities_ and _covariates_ for _attributes_. In other instances _observations_ for _entities_ and _variables_ for _attributes_. In Machine Learning you may see _example_ for _entities_ and _features_ for _attributes_. For the most part, all of these are exchangable. This table summarizes the terminology: | Field | Entities | Attributes | |-------|----------|------------| | Databases | Entities | Attributes | | Machine Learning | Examples | Features | | Statistics | Observations/Samples | Variables/Covariates | This chapter is concerned with the types of data we may encounter as _attributes_ in data analyses. ## Categorical attributes A categorical attribute for a given entity can take only one of a finite set of examples. For example, the `sex` variable can only have value `M`, `F`, or `` (we'll talk about missing data later in the semester). ```{r} table(arrest_tab$sex) ``` The result of a coin flip is categorical: `heads` or `tails`. The outcome of rolling an 8-sided die is categorical: `one`, `two`, ..., `eight`. Can you think of other examples? Categorical data may be _unordered_ or _ordered_. In our example dataset all categorical data is _unordered_, e.g., `sex`, `race`, etc. Examples of _ordered categorical data_ are grades in a class, Likert scale categories, e.g., `strongly agree`, `agree`, `neutral`, `disagree`, `strongly disagree`, etc. ### Factors in R We said that R is designed for data analysis. My favorite example of how that manifests itself is the `factor` datatype. If you look at your dataset now, `arrest_tab$sex` is a vector of strings: ```{r} class(arrest_tab$sex) summary(arrest_tab$sex) ``` However, as a measurement, or attribute, it should only take one of two values (or three depending on how you record missing, unknown or unspecified). So, in R, that categorical data type is called a _factor_. Notice what the `summary` function does after turning the `sex` attribute into a _factor_: ```{r} arrest_tab$sex <- factor(arrest_tab$sex) summary(arrest_tab$sex) ``` This distinction shows up in many other places where functions have different behavior when called on different types of values. The possible values a _factor_ can take are called _levels_: ```{r} levels(arrest_tab$sex) ``` Exercise: you should transform the `race` attribute into a factor as well. How many levels does it have? ## Discrete numeric attributes These are attributes that can take specific values from elements of ordered, discrete (possibly infinite) sets. The most common set in this case would be the non-negative positive integers. This data is commonly the result of counting processes. In our example dataset, age, measured in years, is a discrete attribute. Frequently, we obtain datasets as the result of summarizing, or aggregating other underlying data. In our case, we could construct a new dataset containing the number of arrests per neighborhood (we will see how to do this later) ```{r, echo=FALSE} library(dplyr) arrest_tab %>% group_by(neighborhood) %>% summarize(number_of_arrests=n()) %>% head() ``` In this new dataset, the _entities_ are each neighborhood, the `number_of_arrests` attribute is a _discrete numeric_ attribute. Other examples: the number of students in a class is discrete, the number of friends for a specific Facebook user. Can you think of other examples? Distinctions between ordered categorical and discrete numerical data is that ordered categorical data do not have magnitude. For instance, is an 'A' in a class twice as good as a 'C'? Is a 'C' twice as good as a 'D'? Not necessarily. Grades don't have an inherent magnitude. However, if we _encode_ grades as 'F=0,D=1,C=2,B=3,A=4', etc. they do have magnitude. In that case, an 'A' _is_ twice as good as a 'C', and a 'C' _is_ twice as good as a 'D'. So in summary, if ordered data has magnitude, then _discrete numeric_ if not, _ordered categorical_. ## Continuous numeric data These are attributes that can take any value in a continuous set. For example, a person's height, in say inches, can take any number (within the range of human heights). Here is another dataset we can use to look at this datatype. In this case, entities are cars and we look at continuous numeric attributes `speed` and `stopping distance`: ```{r f04_cars_plot, echo=FALSE} data(cars) cars %>% ggplot(aes(x=speed, y=dist)) + geom_point(size=3) + theme_bw() + labs(x="speed (mph)", y="stopping distance (ft)") #plot(cars$speed, cars$dist, pch=19, xlab="speed (mph)", #ylab="stopping distance (ft)") ``` The distinction between continuous and discrete is a bit tricky since measurements that have finite precision are, in a sense, discrete. Remember, however, that continuity is not a property of the specific dataset you have in hand, but rather of the process you are measuring. The number of arrests in a neighborhood cannot, in principle, be fractional, regardless of the precision at which we measure this. If we had the appropriate tool, we could measure a person's height with infinite precision. This distinction is very important when we build statistical models of datasets for analysis. For now, think of discrete data as the result of counting, and continuous data the result of some physical measurement. Here's a question: is `age` in our dataset a continuous or discrete numeric value? ## Other examples Consider a dataset of images like the super-famous [MNIST dataset of handwritten digits](https://www.kaggle.com/c/digit-recognizer). This dataset contains images of handwritten digits. So each image is an _entity_. Each image has a _label_ attribute which states which of the digits 0,1,...9 is represented by the image. What type of data is this (categorical, continuous numeric, or discrete numeric)? ![](img/mnist_example.png) Now, each image is represented by grayscale values in a 28x28 grid. That's 784 attributes, one for each square in the grid, containing a grayscale value. Now what type of data are these other 784 attributes? ## Other important datatypes The three datatypes we saw above encompass a fairly large swath of data you will come across. Our arrest dataset contains other important datatypes that we will run across frequently: - Text: Arbitrary strings that do not encode a categorical attribute. - Datetime: Date and time of some event or observation (e.g., `arrestDate`, `arrestTime`) - Geolocation: Latitude and Longitude of some event or observation (e.g., `Location.`) ## Units Something that we tend to forget but is **extremely** important for the modeling and interpretation of data is that attributes are for the most part _measurements_ and that they have _units_. For example, age of a person can be measured in different units: _years_, _months_, etc. These can be converted to one another, but nonetheless in a given dataset, that _attribute_ or measurement will be recorded in some specific units. Similar arguments go for distances and times, for example. In other cases, we may have unitless measurements (we will see later an example of this when we do _dimensionality reduction_). In these cases, it is worth thinking about _why_ your measurements are unit-less. When performing analyses that try to summarize the effect of some measurement or attribute on another, units matter a lot! We will see the importance of this in our _regression_ section. For now, make sure you make a mental note of units for each measurement you come across. This will force you to think about where and how your data was obtained, which will become very important when modeling and interpreting the results of these models. ## Quick questions 1) True or False. In a _rectangular dataset_ rows correspond to attributes describing an entity. 2) True or False. In a _rectangular dataset_ rows correspond to entities, the units of observation we are interested in analyzing Suppose I have collected a dataset of sales of handheld devices around the world with attributes `country`, `date`, `device model`, `total sales`. Specify the data type for each of these attributes. <file_sep>/materials/lectures/SentimentAnalysis/SentimentAnalysis.Rmd --- title: "Sentiment Analysis" author: "<NAME>" date: "June 25, 2015" output: html_document --- This document is based on [Cheng-Jun Wang's](http://chengjun.github.io/) [post on sentiment analysis in R](http://chengjun.github.io/en/2014/04/sentiment-analysis-with-machine-learning-in-R/). ##Sentiment Analysis The goal of sentiment analysis is to classify text into sentiment categories (e.g., positive, negative) based on the word content. Initial work in this area used supervised machine learning algorithms. I.e., given a text corpus, each document tagged with a sentiment label, learn a classifier that categorizes new documents based on sentiment. In this document we will do this analysis for a small corpus of Tweets. Current research on far more complex learning models (e.g., Deep Learning) is showing much promise to learn more sophisticated and accurate sentiment models. ##Tools R includes very useful packages for text mining and processing. For more information consult the [Natural Language Processing Task View](http://cran.r-project.org/web/packages/RTextTools/index.html). In particular, we will use the [tm package](http://cran.r-project.org/web/packages/tm/index.html) that includes many fundamental operations in text processing (e.g., stemming, stop word removal, document to word vector representation), and the [RTextTools package](http://cran.r-project.org/web/packages/RTextTools/index.html) that implements a number of Machine Learning algorithms that have proven to be particuarly useful in text classification tasks. ##Datasets and a small corpus of labeled tweets downloaded from [this Sentiment Analysis tutorial](https://github.com/victorneo/Twitter-Sentimental-Analysis). ##Toy Example Let's start by loading the libraries we will use in this analysis and creating the toy dataset (based on [Cheng-Jun Wang's post](http://chengjun.github.io/en/2014/04/sentiment-analysis-with-machine-learning-in-R/)), ```{r setup, echo=FALSE} knitr::opts_chunk$set(cache=FALSE) ``` ```{r load, message=FALSE} library(tm) library(RTextTools) library(e1071) library(dplyr) # a toy dataset of labeled tweets # these are positive tweets, each # tweet in this list is tagged as 'positive' pos_tweets <- rbind( c('I love this car', 'positive'), c('This view is amazing', 'positive'), c('I feel great this morning', 'positive'), c('I am so excited about the concert', 'positive'), c('He is my best friend', 'positive') ) # negative tweets neg_tweets <- rbind( c('I do not like this car', 'negative'), c('This view is horrible', 'negative'), c('I feel tired this morning', 'negative'), c('I am not looking forward to the concert', 'negative'), c('He is my enemy', 'negative') ) # test tweets, we'll use this to test the accuracy of the # learned sentiment classifier test_tweets <- rbind( c('feel happy this morning', 'positive'), c('larry friend', 'positive'), c('not like that man', 'negative'), c('house not great', 'negative'), c('your song annoying', 'negative') ) # put all tweets together into a single matrix tweets <- rbind(pos_tweets, neg_tweets, test_tweets) %>% as.data.frame() colnames(tweets) <- c("tweet", "sentiment") ``` The resulting dataset looks as follows: ```{r, echo=FALSE} tweets %>% sample_n(10) %>% knitr::kable() ``` The first step in the analysis will be to convert each tweet into a word occurence vector. This is representation (known as bag-of-words) is extremely simple, but usually serves as a very good starting point. The conversion is done in two steps, first all words in the corpus are collected, second each document is then represented by a list, of length equal to the total number of words in the corpus, with each entry in the list containing the number of times the corresponding word appears in the document. Alternatively, it may only contain 1 or 0 to indicate if the word appears in the document or not. Let's construct the bag-of-words representation for this toy example. ```{r} # the create matrix function is # defined in the RTextTools package dtm <- create_matrix(tweets[,1], language="english", removeStopwords=FALSE, removeNumbers=TRUE, stemWords=FALSE) ``` Let's see what the resulting document-term matrix looks like: ```{r, echo=FALSE} inspect(dtm) ``` With this representation, we can use standard machine learning classification algorithms. A very simple algorithm is the "Naive Bayes Classifier" that learns a probability of (in this case) each sentiment, based on the frequency each term appears in either positive or negative tweets. Let's train the classifier using the training tweets: ```{r} nb_classifier <- e1071::naiveBayes(as.matrix(dtm[1:10,]), factor(tweets[1:10,2])) ``` Let's plot the how the learned model treats each term when classifying a tweet as positive or negative ```{r} weights <- sapply(nb_classifier$tables, function(x) x[,2]) weights[1,] <- -weights[1,] library(tidyr) library(ggplot2) weight_dat <- weights %>% as.data.frame() %>% mutate(sentiment=rownames(.)) %>% gather(term, weight,-sentiment) %>% spread(sentiment,weight) weight_dat %>% ggplot(aes(x=term, y=positive)) + geom_bar(stat="identity") + geom_bar(stat="identity", aes(y=negative)) + labs(title="Learned weights", y="Positive weight", x="Term") + coord_flip() ``` ### A closer look at NaiveBayes The Naive Bayes classifier is a very simple, but extremely useful approach to classification. It is very similar in flavor to LDA. Remember, what we want to estimate in classification is a **posterior class probability** $p(Y=k|X)$. In our case, $Y$ can be `positive` or `negative` and $X$ are the words observed in a given tweet. In the Naive Bayes classifier we use the _Bayes Rule_, which follows from the definition of conditional probability: $$ P(Y=k|X) = \frac{P(X|Y=k)P(Y=k)}{P(X)} $$ This says that we can estimate sentiment probability from three pieces: the a-priori probability of observing a positive or negative tweet ($P(Y=k)$), the probability of observing a specific set of words in a tweet ($P(X)$) and the _conditional_ probability of observing a specific set of words in a positive or negative tweet ($P(X|Y=k)$). Let's take these one at a time: 1) We can estimate the probability of observing a negative or positive tweet $P(Y=k)$ from the proportion of negative or positive tweets in our training set. Let's denote that quantity $p_k$. Let's write a dplyr expression to compute that. ```{r} apriori_stats <- tweets[1:10,] %>% group_by(sentiment) %>% summarize(num_tweets=n()) %>% mutate(proportion_tweets=num_tweets/sum(num_tweets), log_prop=log(proportion_tweets)) apriori_stats %>% knitr::kable(digits=4) ``` 2) We will predict sentiment based on the posterior probability. If we find that $P(Y=k|X)$ is larger for `positive` than `negative`, then we predict `positive`. Because of that we don't need to estimate $P(X)$ since it is the same value for both classes. 3) This is where things get interesting. $P(X|Y=k)$ is the probability of observing a specific tweet (set of words) in a positive or negative tweet. We will make a first simplification and assume that observing words in tweets are _independent_ events. So, the probability of observing a specific set of words is the product of the probabilities of observing each word in the set: $$ P(X|Y=k) = \prod_{j=1}^l P(X_j|Y=k) $$ where $X_j$ is the $j$-th word in our dictionary, and $P(X_j|Y=k)$ is the probability of observing word $X_j$ in a positive or negative tweet. This is what we showed in the plot above. Now, how should we model this probability: this sounds like a Bernoulli process: we either observe the word in a positive or negative tweet with some probability $p_{jk}$: $$ P(X_j|Y=k) = p_{jk}^{I(j)}(1-p_{jk})^{(1-I(j))} $$ with $I(j)=1$ if tweet contains word $j$ and 0 otherwise. Ok, now how do we estimate $p_{jk}$? We calculate the proportion of tweets of each class that contain the word: ```{r} library(slam) library(tidyr) # first create a dataset with columns word/document/sentiment train_word_tweet_table <- dtm[1:10,] %>% as.matrix() %>% as.data.frame() %>% mutate(doc=rownames(.), sentiment=tweets$sentiment[1:10]) %>% gather(word,count,-doc,-sentiment) %>% filter(count>0) # now count the number of tweets of each class containing each word train_word_count_table <- train_word_tweet_table %>% group_by(sentiment, word) %>% summarize(num_tweets_in_class=n()) # finally, compute the proportion of tweets containing each word for each class train_word_stats <- train_word_count_table %>% inner_join(apriori_stats) %>% select(sentiment,word,num_tweets_in_class,num_tweets) %>% mutate(class_proportion=num_tweets_in_class / num_tweets, log_proportion=log(class_proportion), log_1mproportion=log(1-class_proportion)) # let's take a look at the table train_word_stats %>% ungroup() %>% arrange(word) %>% knitr::kable(digits=4) ``` Let's take one last look at the Naive Bayes model. It's always a good idea to do these calculations in _log-probability_ space instead of _probability_ space for numerical reasons. It is also convenient to look at our conditional probability in log space as well: $$ \begin{align} \log P(Y=k|X) & \propto & \log P(X|Y=k)P(Y=k) \\ {} & = & \log \prod_j P(X_j|Y=k) + \log p_k \\ {} & = & \sum_j \log P(X_j|Y=k) + \log p_k \\ {} & = & \sum_j \log \left[ p_{jk}^{I(j)}(1-p_{jk})^{1-I(j)} \right] + \log p_k \\ {} & = & \sum_j \left[ I(j)\log p_{jk} + (1-I(j))\log (1-p_{jk}) \right] + \log p_k \\ \end{align} $$ Which looks like a linear model! You can think of apriori log-probability $\log p_k$ as an _intercept_, the indicators of word occurence $I(j)$ as _predictors_ and log conditional occurence probabilities $\log p_{jk}$ and $\log (1-p_{jk})$ as _parameters_. The trained Naive Bayes model is completely defined by these two tables: the apriori table and the word/class count tables. To predict the sentiment of a new tweet, we compute the log conditional class probability for each class and predict the sentiment with highest probability: ```{r} test_word_tweet_table <- dtm[-(1:10),] %>% as.matrix() %>% as.data.frame() %>% mutate(doc=rownames(.)) %>% gather(word,count,-doc) %>% filter(count>0) # let's classify one tweet (tweet no. 11) test_word_tweet_table %>% filter(doc==11) %>% right_join(train_word_stats) %>% mutate(count=ifelse(is.na(count),0,count)) %>% mutate(weight=ifelse(count==1, log_proportion, log_1mproportion)) %>% group_by(sentiment) %>% summarize(log_word_prob=sum(weight)) %>% inner_join(apriori_stats) %>% mutate(conditional_prob=log_word_prob + log_prop) %>% select(sentiment, conditional_prob) ``` ### Looking forward So, we see now that we can write all the computations needed to learn a Naive Bayes model for sentiment analysis. Having worked through this toy example we can start thinking of how to setup this type of analysis in a map-reduce architecture like spark. The ingredients will be: 1) read input tweets and tokenize 2) map (in the map-reduce sense) each word/tweet id/sentiment pair onto a computing node 3) reduce (in the map-reduce sense) to count the number of tweets each word appears for each class 4) store the final count tables to make predictions Next time we use `sparkR` to create this full pipeline. <file_sep>/materials/lecture-notes/01-intro.rmd # Introduction and Overview ## What is Data Science? Data science encapsulates the interdisciplinary activities required to create data-centric artifacts and applications that address specific scientific, socio-political, business, or other questions. Let's look at the constiuent parts of this statement: ### Data Measureable units of information gathered or captured from activity of people, places and things. ### Specific Questions Seeking to understand a phenomenon, natural, social or other, can we formulate specific questions for which an answer posed in terms of patterns observed, tested and or modeled in data is appropriate. ### Interdisciplinary Activities Formulating a question, assessing the appropriateness of the data and findings used to find an answer require understanding of the specific subject area. Deciding on the appropriateness of models and inferences made from models based on the data at hand requires understanding of statistical and computational methods. ### Data-Centric Artifacts and Applications Answers to questions derived from data are usually shared and published in meaningful, succinct but sufficient, reproducible artifacts (papers, books, movies, comics). Going a step further, interactive applications that let others explore data, models and inferences are great. ## Why Data Science? The granularity, size and accessibility data, comprising both physical, social, commercial and political spheres has exploded in the last decade or more. > "I keep saying that the sexy job in the next 10 years will be statisticians” > <NAME>, Chief Economist at Google (http://www.nytimes.com/2009/08/06/technology/06stats.html?_r=0) > “The ability to take data—to be able to understand it, to process it, to extract value from it, to visualize it, to communicate it—that’s going to be a hugely important skill in the next decades, not only at the professional level but even at the educational level for elementary school kids, for high school kids, for college kids.” > “Because now we really do have essentially free and ubiquitous data. So the complimentary scarce factor is the ability to understand that data and extract value from it.” > <NAME> (http://www.mckinsey.com/insights/innovation/hal_varian_on_how_the_web_challenges_managers) ## Data Science in Society Because of the large amount of data produced across many spheres of human social and creative activity, many societal questions may be addressed by establishing patterns in data. In the humanities, this can range from unproblematic quesitions of how to dissect a large creative corpora, say music, literature, based on raw characteristics of those works, text, sound and image. To more problematic questions, of analysis of intent, understanding, appreciation and valuation of these creative corpora. In the social sciences, issues of fairness and transparency in the current era of big data are especially problematic. Is data collected representative of population for which inferences are drawn? Are methods employed learning latent unfair factors from ostensibly fair data? These are issues that the research community is now starting to address. In all settings, issues of ethical collection of data, application of models, and deployment of data-centric artifacts are essential to grapple with. Issues of privacy are equally important. ## Course Organization This course will cover basics of how to represent, model and communicate about data and data analyses using the R data analysis environment for Data Science. The course is roughly divided into five areas: - Area 0: tools and skills ○ Toolset (Rstudio/tidyverse/rmarkdown) ○ Best practices ○ Debugging data science - Area I: types and operations ○ Data tables and data types ○ Operations on tables ○ Basic plotting ○ Tidy data / the ER model ○ Relational Operations ○ SQL ○ Advanced: other data models, db consistency and concurrency - Area II: wrangling ○ Data acquisition (load and scrape) ○ EDA Vis / grammar of graphics ○ Data cleaning (text, dates) ○ EDA: Summary statistics ○ Data analysis with optimization (derivatives) ○ Data transformations ○ Missing data - Area III: modeling ○ Univariate probability and statistics ○ Hypothesis testing ○ Multivariate probablity and statistics (joint and conditional probability, Bayes thm) ○ Data Analysis with geometry (vectors, inner products, gradients and matrices) ○ Linear regression ○ Logistic regression ○ Gradient descent (batch and stochastic) ○ Trees and random forests ○ K-NN ○ Naïve Bayes ○ Clustering ○ PCA - Area IV: essential applications ○ Text mining ○ Graphs ○ Forecasting - Area V: communication ○ Designing data visualizations for communication not exploration ○ Interactive visualization ○ Writing about data, analysis, and inferences ## General Workflow The data science activities we will cover are roughly organized into a general workflow that will help us navigate this material (Figure from Zumel and Mount). ![](img/zumel_mount_cycle.png) ### Defining the Goal - What is the question/problem? - Who wants to answer/solve it? - What do they know/do now? - How well can we expect to answer/solve it? - How well do they want us to answer/solve it? ### Data Collection and Management - What data is available? - Is it good enough? - Is it enough? - What are sensible measurements to derive from this data? Units, transformations, rates, ratios, etc. ### Modeling - What kind of problem is it? E.g., classification, clustering, regression, etc. - What kind of model should I use? - Do I have enough data for it? - Does it really answer the question? ### Model Evaluation - Did it work? How well? - Can I interpret the model? - What have I learned? ### Presentation - Again, what are the measurements that tell the real story? - How can I describe and visualize them effectively? ### Deployment - Where will it be hosted? - Who will use it? - Who will maintain it? <file_sep>/materials/slides/stat_intro/stat_intro.Rmd --- title: "Statistical Principles" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Statistical Principles] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] --- ```{r setup, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` # Why Stats? In this class we learn _Statistical and Machine Learning_ techniques for data analysis. By the time we are done, you should - be able to read **critically** papers or reports that use these methods. - be able to use these methods for daata analysis --- # Why Stats? In either case, you will need to ask yourself if findings are **statistically significant**. --- class: split-50 # Why Stats? .column[ - Use a classification algorithm to distinguish images - Accurate 70 out of 100 cases. - Could this happen by chance alone? ] .column[ .image-50[![](img/images.png)] ] --- # Why Stats? To be able to answer these question, we need to understand some basic probabilistic and statistical principles. In this course unit we will review some of these principles. --- layout: true # Variation, randomness and stochasticity --- class: split-30 So far, we have not spoken about _randomness_ and _stochasticity_. We have, however, spoken about _variation_. .column[_spread_ in a dataset refers to the fact that in a population of entities there is naturally occuring variation in measurements] .column[ ```{r, echo=FALSE, message=FALSE, warning=FALSE, fig.height=5} library(tidyverse) theme_set(theme_bw()) library(nycflights13) flights %>% mutate(log_dep_delay = sign(dep_delay) * log(abs(dep_delay + 1))) %>% ggplot(aes(x=log_dep_delay)) + geom_histogram(bins=20) ``` ] --- class: split-50 .column[ Another example: in sets of tweets there is natural variation in the frequency of word usage. ] .column[ .image-50[![](img/trump_words.svg)] ] --- In summary, we can discuss the notion of _variation_ without referring to any randomness, stochasticity or noise. --- layout: true # Why Probability? --- Because, we **do** want to distinguish, when possible: - natural occuring variation, vs. - randomness or stochasticity --- Example: want to learn something about education loan debt for 19-30 year olds in Maryland. - Find loan debt for **all** 19-30 year old Maryland residents, and calculate mean and standard deviation. -- - That's difficult to do for all residents. -- - Instead we sample (say by randomly sending Twitter surveys), and _estimate_ the average and standard deviation of debt in this population from the sample. --- Now, this presents an issue since we could do the same from a different random sample and get a different set of estimates. Why? -- Because there is naturally-occuring variation in this population. --- So, a simple question to ask is: > How good are our _estimates_ of debt mean and standard deviation from sample of 19-30 year old Marylanders? --- Another example: suppose we build a predictive model of loan debt for 19-30 year old Marylanders based on other variables (e.g., sex, income, education, wages, etc.) from our sample. -- > How good will this model perform when predicting debt in general? --- We use probability and statistics to answer these questions. -- - Probability captures stochasticity in the sampling process, while -- - we _model_ naturally occuring variation in measurements in a population of interest. --- layout: false # One final word The term _population_ means > **the entire** collection of entities we want to model This could include people, but also images, text, chess positions, etc. --- layout: true ## Random variables --- The basic concept in our discussion of probability is the _random variable_. Task: is a given tweet was generated by a bot? Action: Sample a tweet **at random** from the set of all tweets ever written and have a human expert decide if it was generated by a bot or not. Principle: Denote this as a _binary_ random variable $X \in \{0,1\}$, with value $1$ if the tweet is bot-gerneated and 0 otherwise. -- Why is this a random value? Because it depends on the tweet that was _randomly_ sampled. --- layout: true ## (Discrete) Probability distributions --- A _probability distribution_ $P:\mathcal{D} \to [0,1]$ over set $\mathcal{D}$ of all values random variable $X$ can take to the interval $[0,1]$. -- We start with a _probability mass function_ $p$: a. $p(X=x) \geq 0$ for all values $x \in \mathcal{D}$, and b. $\sum_{x\in \mathcal{D}} p(X=x) = 1$ --- How to interpret quantity $p(X=1)$? -- a. $p(X=1)$ is the _probability_ that a uniformly random sampled tweet is bot-generated, which implies -- b. the proportion of bot-generated tweets in the set of "all" tweets is $p(X=1)$. --- ### Example The oracle of TWEET Suppose we have a magical oracle and know for a _fact_ that 70% of "all" tweets are bot-generated. -- In that case $p(X=1) = .7$ and $p(X=0)=1-.7=.3$. --- _cumulative probability distribution_ $P$ describes the sum of probability up to a given value: $$ P(x) = \sum_{x' \mathcal D \textrm{ s.t. } x' \leq x} p(X=x') $$ --- ## Expectation What if I randomly sampled $n=100$ tweets? How many of those do I _expect_ to be bot-generated? -- _Expectation_ is a formal concept in probability: $$ \mathbb{E} [X] = \sum_{x\in \mathcal{D}} x p(X=x) $$ --- What is the expectation of $X$ (a single sample) in our tweet example? -- $$ \mathbb{E}[X] = 0 \times p(X=0) + 1 \times p(X=1) = \\ 0 \times .3 + 1 \times .7 = .7 $$ --- What is the expected number of bot-generated tweets in a sample of $n=100$ tweets. Define $Y=X_1 + X_2 + \cdots + X_{100}$. Then we need $\mathbb{E}[Y]$ --- We have $X_i=\{0,1\}$ for each of the $n=100$ tweets Each obtained by uniformly and _independently_ sampling from the set of all tweets. Then, random variable $Y$ is _the number of bot-generated tweets in my sample of $n=100$ tweets_. --- $$ \begin{aligned} \mathbb{E} [Y] & = \mathbb{E} [X_1 + X_2 + \cdots + X_{100}] \\ {} & = \mathbb{E} [X_1] + \mathbb{E} [X_2] + \cdots + \mathbb{E} [X_{100}] \\ {} & = .7 + .7 + \cdots + .7 \\ {} & = 100 \times .7 \\ {} & = 70 \end{aligned} $$ --- This uses some facts about expectation you can show in general. (1) For any pair of random variables $X_1$ and $X_2$, $\mathbb{E} [X_1 + X_2] = \mathbb{E} [X_1] + \mathbb{E} [X_2]$. (2) For any random variable $X$ and _constant_ a, $\mathbb{E} [aX] = a \mathbb{E} [X]$. --- layout: true ## Estimation --- So far we assume we have access to an oracle that told us $p(X=1)=.7$. In reality, we _don't_. -- For our tweet analysis task, we need to _estimate_ the proportion of "all" tweets that are bot-generated. -- This is where our probability model and the expectation we derive from it comes in. --- Given _data_ $x_1, x_2, x_3, \ldots, x_{100}$, With 67 of those tweets labeled as bot-generated (i.e., $x_i=1$ for 67 of them) -- We can say $y=\sum_i x_i=67$. -- We _expect_ $y=np$ with $p=p(X=1)$ -- Use that observation to _estimate_ $p$! --- $$ \begin{aligned} np = 67 & \Rightarrow \\ 100p = 67 & \Rightarrow \\ \hat{p} = \frac{67}{100} & \Rightarrow \\ \hat{p} = .67 \end{aligned} $$ --- Our estimate ($\hat{p}=.67$) is wrong, but close. Can we ever get it right? Can I say how wrong I should expect my estimates to be? --- Notice that our estimate of $\hat{p}$ is the sample _mean_ of $x_1,x_2,\ldots,x_n$. Let's go back to our oracle of tweet to do a thought experiment and replicate how we derived our estimate from 100 tweets a few thousand times. --- ```{r, echo=FALSE, message=FALSE} library(tidyverse) theme_set(theme_bw()) ``` ```{r, echo=FALSE, fig.align="center", fig.width=9} # proportion of bot-tweets in the the tweet population # as given by the oracle of TWEET p <- 0.7 # let's sample 100 tweets # this function chooses between values in a vector (0 and 1) # with probability given by vector prob # we need 100 samples from this vector with replacement # since there are fewer items in the vector than the size # of the sample we are making x <- sample(c(0,1), size=100, replace=TRUE, prob=c(1-p,p)) # compute the estimated proportion that are bot-generated (using the sample mean) phat <- mean(x) # if we had an oracle that let's us do this cheaply, # we could replicate our experiment 1000 times # (you don't in real life) # first let's write a function that gets an estimate # of proportion from a random sample get_estimate <- function(n, p=0.7) mean(sample(c(0,1), size=n, replace=TRUE, prob=c(1-p,p))) # let's make a vector with 1000 _estimates_ phats_100 <- replicate(1000, get_estimate(100)) # now let's plot a histogram of the hist(phats_100, xlab=expression(hat(p)), xlim=c(0.5,1), main="Distribution of p estimates from 100 tweets") ``` --- What does this say about our estimates of the proportion of bot-generated tweets if we use 100 tweets in our sample? Now what if instead of sampling $n=100$ tweets we used other sample sizes? --- ```{r, echo=FALSE, fig.align="center", fig.width=9} par(mfrow=c(2,3)) # what if we sample 10 tweets phats_10 <- replicate(1000, get_estimate(10)) hist(phats_10, main="10 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what if we sample 100 tweets phats_100 <- replicate(1000, get_estimate(100)) hist(phats_100, main="100 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what if we sample 500 tweets phats_500 <- replicate(1000, get_estimate(500)) hist(phats_500, main="500 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 1000 tweets phats_1000 <- replicate(1000, get_estimate(1000)) hist(phats_1000, main="1000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 5000 tweets phats_5000 <- replicate(1000, get_estimate(5000)) hist(phats_5000, main="5000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) # what about 10000 tweets phats_10000 <- replicate(1000, get_estimate(10000)) hist(phats_10000, main="10000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) ``` --- We can make a couple of observations: 1. The distribution of estimate $\hat{p}$ is _centered_ at $p=.7$, our unknown _population_ proportion, and 2. The _spread_ of the distribution **decreases** as the number of samples $n$ **increases**. --- This was a simulation, we faked the **data generating procedure**. In reality, we can't. -- What to do we do then? (1) Math, or (2) Resample --- layout: true ## Solve with Math --- Our simulation is an illustration of two central tenets of statistics: (a) The law of large numbers (LLN) (b) The central limit theorem (CLT) --- ### Law of large numbers (LLN) Given _independently_ sampled random variables $X_1,X_2,\cdots,X_n$ with $\mathbb{E} [X_i]=\mu$ for all $i$, $$ \frac{1}{n} \sum_i X_i \to \mu, \textrm{ as } n\to\infty $$ I.E. $\overline{x}$ _tends_ to the expected value $\mu$ (under some assumptions beyond the scope of this class) regardless of the distribution $X_i$. --- Implication: the sample mean was a good procedure to use to estimate parameters by matching their expected value! --- ### Central Limit Theorem (CLT) The LLN says that estimates built using the sample mean will tend to the correct answer The CLT describes how these estimates are _spread_ around the correct answer. --- Here we will use the concept of _variance_ which is expected _spread_, measured in squared distance, from the _expected value_ of a random variable: $$ \mathrm{var[X]} = \mathbb{E} [(X - \mathbb{E} [X])^2] $$ --- Example: consider the variance of our random tweet example: $$ \begin{aligned} \mathrm{var[X]} & = \sum_{\mathcal{D}} (x-\mathbb{E} [X])^2 p(X=x) \\ {} & = (0 - p)^2 \times (1-p) + (1 - p)^2 \times p \\ {} & = p^2(1-p) + (1-p)^2p \\ {} & = p(1-p) (p + (1-p)) \\ {} & = p(1-p) (p - p + 1) \\ {} & = p(1-p) \end{aligned} $$ --- CLT: Given _independently_ sampled random variables $X_1,X_2,\cdots,X_n$ **from the same probability distribution** $P(X)$, with $\mathbb{E} [X_i]=\mu$ and $\mathrm{var}[X_i]=\sigma^2$ for all $i$, $$ P\left( \frac{1}{n} \sum_{i=1} X_i \right) \to N \left( \mu,\frac{\sigma}{n} \right), \textrm{ as } n\to \infty $$ --- This says, that as sample size $n$ increases, the distribution of sample means is _well_ approximated by a **normal distribution**. This means we can approximate the _expected error_ of our estimates well. --- layout: true ## (Continuous) Random Variables --- ### The normal distribution Random variable $Y=\sum_{i=1}^n X_i$ is _continuous_. The normal distribution describes the distribution of _continuous_ random variables over the range $(-\infty,\infty)$ using two parameters: **mean** $\mu$ and **standard deviation** $\sigma$. -- We write " $Y$ is normally distributed with mean $\mu$ and standard deviation $\sigma$" as $Y\sim N(\mu,\sigma)$. --- Continuous random variables are described by a _probability density function_. For normally distributed random variables: $$p(Y=y) = \frac{1}{\sqrt{2\pi}\sigma} \mathrm{exp} \left\{ -\frac{1}{2} \left( \frac{y-\mu}{\sigma} \right)^2 \right\}$$ --- Three examples of normal probability density functions with mean $\mu=60,50,60$ and standard deviation $\sigma=2,2,6$: ```{r, echo=FALSE, fig.align="center", fig.height=5.5} # 100 equally spaced values between 40 and 80 yrange <- seq(40, 80, len=100) # values of the normal density function density_values_1 <- dnorm(yrange, mean=60, sd=2) density_values_2 <- dnorm(yrange, mean=50, sd=2) density_values_3 <- dnorm(yrange, mean=60, sd=6) # now plot the function plot(yrange, density_values_1, type="l", col="red", lwd=2, xlab="y", ylab="density") lines(yrange, density_values_2, col="blue", lwd=2) lines(yrange, density_values_3, col="orange", lwd=2) legend("topright", legend=c("mean 60, sd 2", "mean 50, sd 2", "mean 60, sd 6"), col=c("red","blue","orange"), lwd=2) ``` --- Like the discrete case, probability density functions for continuous random variables need to satisfy certain conditions: a. $p(Y=y) \geq 0$ for all values $Y \in (-\infty,\infty)$, and b. $\int_{-\infty}^{\infty} p(Y=y) dy = 1$ --- One way of interpreting the density function of the normal distribution is that probability decays exponentially with rate $\sigma$ based on squared distance to the mean $\mu$. (Here is squared distance again!) $$ p(Y=y) \propto \exp \left\{ -{\frac{1}{2\sigma^2} (y-\mu)^2} \right \} $$ --- Also, notice the term inside the square? $$ z = \left( \frac{y - \mu}{\sigma} \right) $$ this is the _standardization_ transformation we saw before. --- The name _standardization_ comes from the _standard normal distribution_ $N(0,1)$ (mean 0 and standard deviation 1), Which is very convenient to work with because it's density function is much simpler: $$p(Z=z) = \frac{1}{\sqrt{2\pi}} \mathrm{exp} \left\{ -\frac{1}{2} z^2 \right\}$$ -- In fact, if random variable $Y \sim N(\mu,\sigma)$ then random variable $Z=\frac{Y-\mu}{\sigma} \sim N(0,1)$. --- One more technicality: The cumulative probability function for continuous random variables is given by $$ P(Y\leq y) = \int_{\mathcal D} p(Y=y) dy $$ where $\mathcal{D}$ is the range of values random variable $Y$ can take (e.g., for normal distribution $\mathcal{D}=(-\infty,\infty)$) --- layout: true ## Solve with Math --- ### CLT continued We need one last bit of terminology to finish the statement of the CLT. Consider data $X_1,X_2,\cdots,X_n$ with $\mathbb{E}[X_i]= \mu$ for all $i$, **and** $\mathrm{var}(X_i)=\sigma^2$ for all $i$, and sample mean $Y=\frac{1}{n} \sum_i X_i$. The standard deviation of $Y$ is called the _standard error_: $$ \mathrm{se}(Y) = \frac{\sigma}{\sqrt{n}} $$ --- Now we can restate the CLT statement precisely: the distribution of $Y$ tends _towards_ $N\left( \mu,\frac{\sigma}{\sqrt{n}} \right)$ as $n \rightarrow \infty$. This says, that as sample size increases the distribution of sample means is well approximated by a normal distribution, and that the spread of the distribution goes to zero at the rate $\sqrt{n}$. --- _Disclaimer_ There a few mathematical subtleties. Two important ones are that a. $X_1,\ldots,X_n$ are iid (independent, identically distributed) random variables, and b. $\mathrm{var}[X] < \infty$ --- Let's redo our simulated replications of our tweet samples to illustrate the CLT at work: ```{r, echo=FALSE, fig.align="center", fig.height=5.5, fig.width=9} # we can calculate standard error for each of the # settings we saw previously and compare these replications # to the normal distribution given by the CLT # let's write a function that adds a normal density # plot for a given sample size draw_normal_density <- function(n,p=.7) { se <- sqrt(p*(1-p))/sqrt(n) f <- dnorm(seq(0.5,1,len=1000), mean=p, sd=se) lines(seq(0.5,1,len=1000), f, col="red", lwd=1.6) } par(mfrow=c(2,3)) # what if we sample 10 tweets phats_10 <- replicate(1000, get_estimate(10)) hist(phats_10, main="10 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(10) # what if we sample 100 tweets phats_100 <- replicate(1000, get_estimate(100)) hist(phats_100, main="100 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(100) # what if we sample 500 tweets phats_500 <- replicate(1000, get_estimate(500)) hist(phats_500, main="500 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(500) # what about 1000 tweets phats_1000 <- replicate(1000, get_estimate(1000)) hist(phats_1000, main="1000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(1000) # what about 5000 tweets phats_5000 <- replicate(1000, get_estimate(5000)) hist(phats_5000, main="5000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(5000) # what about 10000 tweets phats_10000 <- replicate(1000, get_estimate(10000)) hist(phats_10000, main="10000 tweets", xlab="p hat", xlim=c(.5,1), probability=TRUE) draw_normal_density(10000) ``` --- Here we see the three main points of the LLN and CLT: (1) the normal density is centered around $\mu=.7$, (2) the normal approximation gets better as $n$ increases, and (3) the standard error goes to 0 as $n$ increases. --- layout: true ## Solve with computation ### The Bootstrap Procedure --- What if the conditions that we used for the CLT don't hold? For instance, samples $X_i$ may not be independent. What can we do then, how can we say something about the precision of sample mean estimate $Y$? --- A useful procedure to use in this case is the **bootstrap**. It is based on using _randomization_ to simulate the stochasticity resulting from the population sampling procedure we are trying to capture in our analysis. --- The main idea is the following: given observations $x_1,\ldots,x_n$ and the estimate $y=\frac{1}{n}\sum_{i=1}^n x_i$, what can we say about the standard error of $y$? --- There are two challenges here: 1) our estimation procedure is deterministic, that is, if I compute the sample mean of a specific dataset, I will always get the same answer; and 2) we should retain whatever properties of estimate $y$ result from obtaining it from $n$ samples. --- The bootstrap is a randomization procedure that measures the variance of estimate $y$, using randomization to address challenge (1), but doing so with randomized samples of size $n$, addressing challenge (2). --- The procedure goes as follows: 1. Generate $B$ random datasets by sampling _with replacement_ from dataset $x_1,\ldots,x_n$. Denote randomized dataset $b$ as $x_{1b},\ldots,x_{nb}$. 2. Construct estimates from _each_ dataset, $y_b = \frac{1}{n}\sum_i x_{ib}$ 3. Compute center (mean) and spread (variance) of estimates $y_b$ --- Let's see how this works on tweet oracle example ```{r, echo=FALSE, fig.height=5.5, fig.width=8, fig.align="center"} # remember our dataset is in variable x # this is how we get one bootstrap replicate # sample n observations from dataset x _with replacement_ xb <- sample(x, length(x), replace=TRUE) # let's do B=100 bootstrap randomizations using the # replicate function (it just replicates the given expression # however many times it is directed to do so) B <- 200 xb <- replicate(B, sample(x,length(x), replace=TRUE)) # xb is a matrix with 100 rows (the original length of dataset) and # 200 columns (the number of replicates) # now let's compute the bootstrap estimates y yb <- colMeans(xb) # and make a histogram of the bootstrap estimates hist(yb, probability=TRUE, main="Histogram of bootstrap estimates", xlab="Bootsrap Estimates",xlim=c(0.5,1)) abline(v=p, col="blue") draw_normal_density(100) ``` --- Not great, math works better when conditions are met. --- Let's look at a case where we don't expect the normal approximation to not work so well by making samples not identically distributed. Let's make a new ORACLE of tweet where the probability of a tweet being bot-generated depends on the previous tweet --- ```{r, echo=FALSE} create_chain_dataset <- function(n=100, first_p=.7, change_p=.6) { res <- vector("numeric", n) res[1] <- sample(c(0,1), 1, prob=c(1-first_p, first_p)) for (i in seq(2,n)) { res[i] <- ifelse(runif(1) <= change_p, 1-res[i-1], res[i-1]) } res } chain_x <- create_chain_dataset(100) ``` ```{r, echo=FALSE, cache=FALSE, fig.align="center", fig.width=8, fig.height=5.5} # Now let's do the same bootstrap procedure in this case xb <- replicate(B, sample(chain_x, length(chain_x), replace=TRUE)) # xb is a matrix with 100 rows (the original length of dataset) and # 200 columns (the number of replicates) # now let's compute the bootstrap estimates y yb <- colMeans(xb) # and make a histogram of the bootstrap estimates hist(yb, probability=TRUE, main="Histogram of bootstrap estimates", xlab="Bootsrap Estimates", xlim=c(0,1)) draw_normal_density <- function(n,p=.7) { se <- sqrt(p*(1-p))/sqrt(n) f <- dnorm(seq(0,1,len=1000), mean=p, sd=se) lines(seq(0,1,len=1000), f, col="red", lwd=1.6) } draw_normal_density(100, mean(chain_x)) ``` --- Here, an analysis based on the classical CLT is not appropriate ( $X_i$ s are not independent) But the bootstrap analysis gives some information about the variability of our estimates. --- layout: true ## Summary How do we learn information about a **population**? Estimation. What can we say about estimates? Say something about sampling error. How? Math (LLN and CL), or Computation (Bootstrap) <file_sep>/materials/slides/networks/network-data.Rmd --- title: "Network Data" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: chakra: libs/remark-0.14.0.min.js lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[Introduction to Data Science: Network Data] .author[<NAME>] .other-info[ University of Maryland, College Park, USA `r Sys.Date()` ] .logo[![](img/logo.png)] --- layout: true ## Network Data --- ```{r setup1, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` In many applications we have data about entities, but also have data about interactions between entities. Dataset of financial transactions available from Kaggle at https://www.kaggle.com/ntnu-testimon/paysim1. Some of these transactions are marked as fraudulent. ```{r libsetup, echo=FALSE, message=FALSE, warning=FALSE} library(tidyverse) library(ggraph) library(tidygraph) extrafont::loadfonts() ``` ```{r read_data, cache=TRUE, echo=FALSE, message=FALSE} node_df <- read_csv("data/transactions_nodes.csv") edge_df <- read_csv("data/transactions_edges.csv") ``` ```{r setup_data, cache=TRUE, echo=FALSE, message=FALSE} transaction_graph <- edge_df %>% inner_join(select(node_df, name, index), by=c("from"="index")) %>% mutate(from=name) %>% select(-name) %>% inner_join(select(node_df, name, index), by=c("to"="index")) %>% mutate(to=name) %>% select(-name) %>% as_tbl_graph(directed=TRUE) %>% activate(nodes) %>% mutate(node_type = str_sub(name, 1, 1)) %>% activate(edges) %>% mutate(isFraud=factor(isFraud)) ``` --- ```{r plotgraph, echo=FALSE, warning=FALSE} transaction_graph %>% ggraph(layout='graphopt') + geom_edge_fan(aes(color=isFraud), width=1) + geom_node_point(size=.1) + theme_graph(foreground=NA) + ggtitle("Network of financial transactions") ``` --- layout: true ## Preliminaries --- Think about ways to represent data about entities and their interactions. Mathematically, we use a **Network** as an abstraction of _entities_ and their interactions. We can use a **Graph** as a mathematical representation of this data. In this case, _vertices_ represent nodes (entities), and _edges_ represent links (interactions). --- Here is another graph as an example. In this case edges (or links) do not have directionality. ```{r toygraph, echo=FALSE, message=FALSE, warning=FALSE} library(tidygraph) library(ggraph) graph <- as_tbl_graph(highschool) graph_1958 <- graph %>% activate(edges) %>% filter(year == 1958) undirected_graph_1958 <- graph_1958 %>% convert(to_undirected) %>% convert(to_simple) ``` ```{r plot_undirected, echo=FALSE} undirected_graph_1958 %>% ggraph(layout="kk") + geom_edge_fan() + geom_node_point(size=3) + theme_graph(foreground=NA) + ggtitle("Undirected graph") ``` --- We can also represent directional interactions with directed edges. ```{r toygraph2, echo=FALSE, message=FALSE} graph_1958 %>% ggraph(layout="kk") + geom_edge_fan(arrow=arrow(length=unit(0.15,"inches"))) + geom_node_point(size=3) + theme_graph(foreground=NA) + ggtitle("Directed graph") ``` --- In terms of our previous discussion on tidy, rectangular datasets, this is a case where we need to have two distinct tables to represent this data. - One table represents entities and their attributes: ```{r graph_nodes, echo=FALSE} transaction_graph %>% activate(nodes) %>% as_tibble() ``` --- - Second table to represent edges and their attributes: ```{r graph_edges, echo=FALSE} transaction_graph %>% activate(edges) %>% as_tibble() ``` --- layout: true ## Network-derived attributes --- Besides attributes measured for each node, in our example the type of party (Merchant or not for example), we can derive node and edge attributes based on the structure of the network. For instance, we can compute the _degree_ of a node, that is, the number of edges incident to the node. --- ```{r node_add_degree, echo=FALSE} transaction_graph <- transaction_graph %<>% activate(nodes) %>% mutate(in_degree = centrality_degree(mode="in"), out_degree = centrality_degree(mode="out")) transaction_graph %>% activate(nodes) %>% as_tibble ``` --- The distribution of these newly created attributes, e.g., degree, are fundamental analytical tools to characterize networks. ```{r plot_deg, echo=FALSE, eval=TRUE,fig.align='center',fig.height=5} transaction_graph %>% activate(nodes) %>% as_tibble() %>% group_by(in_degree) %>% summarize(n=n()) %>% ungroup() %>% mutate(num_nodes = sum(n)) %>% mutate(deg_prop = n / num_nodes) %>% ggplot(aes(x=log(in_degree), y=log(deg_prop))) + geom_point() + labs(title="Degree distribution of transaction network", x="In-degree (log)", y="Proportion of nodes (log)") ``` --- High-degree nodes are _important_ to the network since they interact with many other nodes in the network? It would be useful to know if the nodes they interact with are also _important_ nodes. This is referred to as _centrality_. --- ```{r centrality, echo=FALSE,eval=TRUE,warning=FALSE,fig.align='center'} undirected_graph_1958 %>% activate(nodes) %>% mutate(centrality = centrality_eigen()) %>% ggraph(layout="kk") + geom_edge_fan() + geom_node_point(aes(size=centrality)) + theme_graph(foreground=NA) + ggtitle("Eigen-centrality") ``` --- We can similarily think of _important_ edges in the network. What are edges that may connect clusters of nodes in the network? One measure of edge importance is _betweeness_. --- ```{r betweenness, echo=FALSE,eval=TRUE,warning=FALSE,fig.align='center'} undirected_graph_1958 %>% activate(edges) %>% mutate(betweenness = centrality_edge_betweenness()) %>% ggraph(layout="kk") + geom_edge_fan(aes(width=betweenness)) + scale_edge_width_continuous(range=c(0,2)) + geom_node_point(size=3) + theme_graph(foreground=NA) ``` --- These types of network-derived attributes can in turn be used to understand topological properties of networks. For instance, we can use betweeness to find _communities_ or clusters of nodes in the graph. The Girvan-Newman Algorithm is a hierarchical method to partition nodes into communities using edge betweenness --- ```{r gn, echo=FALSE, eval=TRUE,warning=FALSE,fig.align='center'} undirected_graph_1958 %>% activate(edges) %>% mutate(betweenness=centrality_edge_betweenness()) %>% activate(nodes) %>% mutate(group = factor(group_edge_betweenness())) %>% ggraph(layout="kk") + geom_edge_fan(aes(width=betweenness)) + scale_edge_width_continuous(range=c(0,2)) + geom_node_point(aes(color=group), size=3) + theme_graph(foreground=NA) ``` --- ### Calculating Betweenness Formally, $\mathrm{betweenness}(e)$: fraction of node pairs $(x,y)$ where shortest path crosses edge $e$ For each node $x$, use breadth-first-search to count number of shortest paths through each edge in graph Sum result across nodes, and divide by two --- --- layout: true ## Resources --- There are a number of very useful R and python software tools to represent and manipulate network data. ### Cross-language igraph: http://igraph.org/ Extremely powerful tool for the representation, manipulation and visualization of network data. It underlies many of the R and python network libraries. --- ### R In R, the most commonly used packages are: - [`igraph`](https://cran.r-project.org/web/packages/igraph/index.html) - [`Rgraphviz`](https://www.bioconductor.org/packages/release/bioc/html/Rgraphviz.html) Newer pacakges use the tidy data paradigm to represent and manipulate networks: - [`tidygraph`](https://cran.r-project.org/web/packages/tidygraph/index.html) - [`ggraph`](https://cran.r-project.org/web/packages/ggraph/index.html) --- ### Python In python, most common tools are: - [`igraph`](http://igraph.org/python/doc/tutorial/tutorial.html) - [`networkx`](https://networkx.github.io/) <file_sep>/materials/lectures/Shiny/Shiny.md Interactive Web-based Data Visualization (1) ======================================================== author: CMSC320 date: Spring 2016 ======================================================== _Politics_: [http://www.nytimes.com/interactive/2012/11/02/us/politics/paths-to-the-white-house.html?_r=0](http://www.nytimes.com/interactive/2012/11/02/us/politics/paths-to-the-white-house.html?_r=0) _Science_: [http://epiviz.cbcb.umd.edu/?ws=YOsu0RmUc9l](http://epiviz.cbcb.umd.edu/?ws=YOsu0RmUc9l) _Movies_: [http://www.nytimes.com/interactive/2013/02/20/movies/among-the-oscar-contenders-a-host-of-connections.html](http://www.nytimes.com/interactive/2013/02/20/movies/among-the-oscar-contenders-a-host-of-connections.html) _Sports_: [http://fivethirtyeight.com/interactives/march-madness-predictions-2015/#mens](http://fivethirtyeight.com/interactives/march-madness-predictions-2015/#mens) Why Interactivity? ======================================================== incremental: true _Reduce data dimension_: allow user to explore large datasets by quickly switching between dimensions _Overview first, zoom and filter, details on demand_: Provide big picture, let the user explore details as they desire _Linked views for high dimensions_: There is a limit to the number of aesthetic mappings in a single graphic, make multiple graphics but link data objects between them Web-based interactive visualization ====================================== Take advantage of HTML document description and the [Document Object Model](http://www.w3.org/DOM/) interface to _bind_ data to page elements. - Shiny: bind data to controls - Data-driven Documents (d3.js): bind data to svg elements directly HTML and DOM ======================================================== Web pages are structured using Hypertext Markup Language ```html <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>Page Title</h1> <p>This is a really interesting paragraph.</p> </body> </html> ``` Basic idea is to only specify _content_ and _structure_ but not specify directly _how_ to render pages. HTML and DOM ============================== Web pages are structured using Hypertext Markup Language ```html <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>Page Title</h1> <p>This is a really interesting paragraph.</p> </body> </html> ``` Structure is provided by page _elements._ An important element we'll see later is the arbitrary grouping/containment element `div`. HTML and DOM ============================== Web pages are structured using Hypertext Markup Language ```html <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>Page Title</h1> <p>This is a really interesting paragraph.</p> </body> </html> ``` The hierarchical structure of elements in a document are defined by the _Document Object Model_ (DOM). CSS =========================== Cascading Style Sheets are used to style elements in the DOM. ``` body { background-color: white; color: black; } ``` In general: ``` selectorA, selectorB, selectorC { property1: value; property2: value; property3: value; } ``` CSS ============================== Selectors: - Type selectors: match DOM elements by name ``` h1 /* select all level 1 headings */ p /* select all paragraphs */ div /* select all divs */ ``` CSS ============================== Selectors: - Class selectors: match DOM elements assigned to a specified class. ``` <p class="alert">You are about to become interactive</p> <p>But you are not</p> ``` ``` .alert { background-color: red; color: white; } SVG =========================== Scalable Vector Graphics (SVG) is special element used to create graphics with text. ``` <svg width="50" height="50"> <circle cx="25" cy="25" r="22" fill="blue" stroke="gray" stroke-width="2"/> </svg> ``` SVG ================================ Elements have _geometric_ attributes and _style_ attributes. ``` <circle cx="250" cy="25" r="25"/> ``` `cx`: x-coordinate of circle center `cy`: y-coordinate of circle center `r`: radius of circle SVG ============================= Elements have _geometric_ attributes and _style_ attributes. ``` <rect x="0" y="0" width="500" height="50"/> ``` `x`: x-coordinate of left-top corner `y`: y-coordinate of left-top corner `width`, `height`: width and height of rectangle SVG ================================== _style_ attributes ``` <circle cx="25" cy="25" r="22" fill="yellow" stroke="orange" stroke-width="5"/> ``` can be styled by class as well ``` svg .pumpkin { fill: yellow; stroke: orange; stroke-width: 5; } ``` ``` <circle cx="25" cy="25" r="22" class="pumpkin"> ``` Shiny and D3 =========================== Shiny: construct DOM and bind data (variables for example) to elements (a slide control for example) D3: bind data to SVG element attributes (position, size, color, transparency, etc.) Reactivity ================= Interactivity and binding in Shiny achieved using _reactive programming_. Where objects _react_ to changes in other objects. ![](reactive1.png) Reactivity ============== Example: ![](reactive2.png) Reactivity ================= With intermediate objects: ![](reactive3.png) Reactivity ==================== A standard paradigm for interactive (event-driven) application development A nice review paper: [http://dl.acm.org/citation.cfm?id=2501666](http://dl.acm.org/citation.cfm?id=2501666) Binding data to graphical elements ================ So far, we have bound data objects to document elements. More examples: [http://shiny.rstudio.com/gallery/](http://shiny.rstudio.com/gallery/) Next time we bind data directly to _graphical_ elements - since using SVG these are also document elements (D3). <file_sep>/materials/lectures/Evaluation/Evaluation.Rmd --- title: "Evaluation using resampling methods" author: "CMSC320" date: "`r Sys.Date()`" output: html_document --- ```{r, echo=FALSE} knitr::opts_chunk$set(cache=TRUE) ``` Our discussion on regression and classification has been centered on fitting models by minizing error or maximizing likelihood given a dataset (also referred to as training data). This is usually fine when we want to use our model for _explanatory_ or _inferential_ tasks. Or when we use relatively inflexible models, like linear regression or logistic regression. However, as our interests shift to _prediction_ and more complex models, like non-linear regression, Tree-based methods or Support Vector Machines, this is usually not sufficient. In these cases, our goal is to avoid building models that are too _specific_ for the dataset we have on hand. Complex models can easily **overfit** our training data, in which case we don't learn much about the _population_ from which we obtain the training data and instead learn only about the training data itself. We say that we want to learn, or train models that **generalize** beyond the training data to other, unseen, data from the same population. This leads to a bit of an issue. How do we measure our models ability to predict unseen data, when we only have access to training data? ### Cross Validation The most common method to evaluate model **generalization** performance is _cross-validation_. It is used in two essential data analysis phases: _Model Selection_ and _Model Assessment_. In _Model Selection_, we decide how complex of a model we should fit. Consider a linear regression example: I will fit a linear regression model, what predictors should be included?, interactions?, data transformations? Another example is what classification tree depth to use. In _Model Assessment_, we determine how well does our selected model performs as a **general** model. Example: I've built a linear regression models, with specific predictors. How well will it perform on unseen data? The same question can be asked of a classification tree (of specific depth). Cross-validation is a _resampling_ method to obtain estimates of **test error rate** (or any other performance measure on unseen data). In some instances, you will have a large predefined test dataset **that you should never use when training**. In the absence of access to this kind of dataset, cross validation can be used. ### Validation Set The simplest option to use cross-validation is to create a _validation_ set, where our dataset is **randomly** divided into _training_ and _validation_ sets. Then the _validation_ is set aside, and not used at until until we are ready to compute **test error rate** (once, don't go back and check if you can improve it). ![](validation.png) Let's look at our running example using automobile data, where we want to build a regression model capable of predicting miles per gallon given other auto attributes. We saw in previous lectures that a linear regression model was not appropriate for this dataset. So instead we will use _polynomial_ regression. ```{r, echo=TRUE} library(ggplot2) library(ISLR) data(Auto) ggplot(Auto, aes(x=horsepower, y=mpg)) + geom_point() + geom_smooth() ``` In this case our regression model (for a single predictor $x$ is given by) as a $d$ degree polynomial. $$ \mathbb{E}y|X=x = \beta_0 + \beta_1 x + \beta_2 + x^2 + \cdots + \beta_d x^d $$ In the _Model Selection_ case, we want to decide what degree $d$ we should use to model this data. Using the _validation set_ method, we split our data into a training set, fit the regression model with different polynomial degrees $d$ on the training set, and measure test error on the validation set. ```{r, echo=TRUE} set.seed(1234) in_validation <- sample(nrow(Auto), nrow(Auto)/2) validation_set <- Auto[in_validation,] training_set <- Auto[-in_validation,] library(broom) library(dplyr) degrees <- seq(1, 10) error_rates <- sapply(degrees, function(deg) { fit <- lm(mpg~poly(horsepower, degree=deg), data=training_set) predicted <- predict(fit, newdata=validation_set) mean((validation_set$mpg - predicted)^2) }) plot(degrees, error_rates, type="b", xlab="Polynomial Degree", ylab="Mean Squared Error", pch=19, lwd=1.4, cex=1.4) ``` ### Resampled validation set This approach can be prone to sampling issues. It can be highly variable as error rate is a random quantity and depends on observations in training and validation sets. We can improve our estimate of _test error_ by averaging multiple measurements of it (remember the law of large numbers). We can do so by replicating our validation resampling 10 times (with different validation and training sets) and averaging the resulting test errors. ```{r, echo=TRUE} set.seed(1234) library(RColorBrewer) palette(brewer.pal(10, "Dark2")) degrees <- seq(1, 10) error_rates <- replicate(10, { in_validation <- sample(nrow(Auto), nrow(Auto)/2) validation_set <- Auto[in_validation,] training_set <- Auto[-in_validation,] sapply(degrees, function(deg) { fit <- lm(mpg~poly(horsepower, degree=deg), data=training_set) predicted <- predict(fit, newdata=validation_set) mean((validation_set$mpg - predicted)^2) }) }) matplot(degrees, error_rates, type="b", pch=19, xlab="Polynomial Degree", ylab="Mean Squared Error", lwd=1.4, cex=1.4) ``` ### Leave-one-out Cross-Validation This approach still has some issues. Each of the training sets in our validation approach only uses 50% of data to train, which leads to models that may not perform as well as models trained with the full dataset and thus we can overestimate error. To alleviate this situation, we can extend our approach to the extreme. Make each single training point it's own validation set. Procedure: For each observation $i$ in data set: a. Train model on all but $i$-th observation b. Predict response for $i$-th observation c. Calculate prediction error This gives us the following _cross-validation_ estimate of error. $$ CV_{(n)} = \frac{1}{n} \sum_i (y_i - \hat{y}_i)^2 $$ ![](loocv.png) The advantages of this approach is that now we use $n-1$ observations to train each model and there is no randomness introduced since error is estimated on each sample. However, it has disadvantages as well. Depending on the models we are trying to fit, it can be very costly to train $n-1$ models. Also, the error estimate for each model is highly variable (since it comes from a single datapoint). ```{r, echo=TRUE} error_rates <- sapply(degrees, function(deg) { mean(sapply(seq(len=nrow(Auto)), function(i) { fit <- lm(mpg~poly(horsepower, degree=deg), data=Auto[-i,]) (Auto$mpg[i] - predict(fit, newdata=Auto[i,,drop=FALSE]))^2 })) }) ``` ```{r, echo=TRUE} plot(degrees, error_rates, pch=19, cex=1.4, lwd=1.4, xlab="Polynomial Degree", ylab="Cross Validation Error", type="b") ``` For linear models (and some non-linear models) there is a nice trick that allows one to compute (exactly or approximately) LOOCV from the full data model fit which we will not get into here. ### k-fold Cross-Validation This discussion leads us to the most commonly used cross-validation approach _k-fold Cross-Validation_. Procedure: Partition observations randomly into $k$ groups (folds). For each of the $k$ groups of observations: - Train model on observations in the other $k-1$ folds - Estimate test-set error (e.g., Mean Squared Error) Compute average error across $k$ folds ![](kfoldcv.png) $$ CV_{(k)} = \frac{1}{k} \sum_i MSE_i $$ where $MSE_i$ is mean squared error estimated on the $i$-th fold In this case, we have fewer models to fit (only $k$ of them), and there is less variance in each of the computed test error estimates in each fold. It can be shown that there is a slight bias (over estimating usually) in error estimate obtained from this procedure. ```{r, echo=TRUE} set.seed(1234) k <- 10 n <- nrow(Auto) fold_size <- ceiling(n/k) permuted_indices <- rep(NA, k * fold_size) permuted_indices[1:n] <- sample(n) fold_indices <- matrix(permuted_indices, nc=k) cv10_error_rates <- sapply(seq(1,k), function(fold_index) { test_indices <- na.omit(fold_indices[,fold_index]) train_set <- Auto[-test_indices,] test_set <- Auto[test_indices,] res <- sapply(degrees, function(deg) { fit <- lm(mpg~poly(horsepower, degree=deg), data=train_set) mean((Auto$mpg[test_indices] - predict(fit, newdata=test_set))^2) }) res }) ``` ```{r, echo=TRUE} matplot(degrees, cv10_error_rates, pch=19, type="b", lwd=1.4, cex=1.4, xlab="Polynomial Degrees", ylab="10-fold CV Error Rate") ``` ### Cross-Validation in Classification Each of these procedures can be used for classification as well. In this case we would substitute MSE with performance metric of choice. E.g., error rate, accuracy, TPR, FPR, AUROC. Note however that not all of these work with LOOCV (e.g. AUROC) ### Model assessment using cross-validation Suppose you want to compare two classification models (logistic regression vs. a decision tree) on the `Default` dataset. We can use Cross-Validation to determine if one model is better than the other, using the hypothesis testing approach we studied previously. ```{r, echo=TRUE} library(ISLR) library(cvTools) library(tree) data(Default) fold_indices <- cvFolds(n=nrow(Default), K=10) error_rates <- sapply(1:10, function(fold_index) { test_indices <- which(fold_indices$which == fold_index) test_set <- Default[test_indices,] train_set <- Default[-test_indices,] logis_fit <- glm(default~., data=train_set, family="binomial") logis_pred <- ifelse(predict(logis_fit, newdata=test_set, type="response") > 0.5, "Yes", "No") logis_error <- mean(test_set$default != logis_pred) tree_fit <- tree(default~., data=train_set) pruned_tree <- prune.tree(tree_fit, best=3) tree_pred <- predict(pruned_tree, newdata=test_set, type="class") tree_error <- mean(test_set$default != tree_pred) c(logis_error, tree_error) }) rownames(error_rates) <- c("logis", "tree") error_rates <- as.data.frame(t(error_rates)) library(tidyr) library(dplyr) error_rates <- error_rates %>% mutate(fold=1:n()) %>% gather(method,error,-fold) ``` ```{r, echo=TRUE} boxplot(error~method, data=error_rates) ``` ```{r, echo=TRUE} lm(error~method, data=error_rates) %>% tidy() %>% knitr::kable() ``` In this case, we do not observe any significant difference between these methods. ### Summary Model selection and assessment are critical steps of data analysis. Resampling methods are general tools used for this purpose. Many data analysis frameworks have a lot of supporting libraries for this: `boot`, `cvTools`, many more. <file_sep>/content/resources/index.md --- date: 2016-08-29T08:54:37-04:00 title: CMSC320 Resources --- ## R * [R](http://www.r-project.org) is an open-source environment for data analysis. * The [RStudio](http://www.rstudio.com/ide) IDE is highly recommended. The [Revolution IDE](http://www.revolutionanalytics.com/academic-and-public-service-programs) is also very good, only Linux and Windows. * [tidyverse](http://tidyverse.org) is a collection of data science packages designed for consistency and interoperability. * [swirl](http://swirlstats.com/) is an interactive R (and general data analysis) tutorial * [Data Camp](https://www.datacamp.com/courses/free-introduction-to-r) has a nice short online course introducing R * R Task Views: The [Machine Learning](http://cran.r-project.org/web/views/MachineLearning.html) and [Optimization](http://cran.r-project.org/web/views/Optimization.html) Task Views list useful packages in R we may use. * R/Matlab references: [A short R guide for Matlab users.](http://mathesaurus.sourceforge.net/octave-r.html) [A longer one.](http://cran.r-project.org/doc/contrib/Hiebeler-matlabR.pdf) * R/Python references: [A short R guide for Python users.](http://mathesaurus.sourceforge.net/r-numpy.html) ## Python * [Python Tutorial](http://docs.python.org/tutorial/) * [Python Docs](http://docs.python.org/index.html) * [DataCamp Intro to Python](https://www.datacamp.com/courses/intro-to-python-for-data-science) * [Jupyter notebooks](http://jupyter.org/) * [Google Colab notebooks](http://colab.research.google.com) * [Short python tutorial by <NAME>](http://www.umiacs.umd.edu/~hal/courses/2011F_ML/p0/) * [scikit-learn](https://scikit-learn.org/stable/index.html) * [Numpy Tutorial (tentative)](http://scipy.org/Tentative_NumPy_Tutorial) * [Numpy User's Guide](http://docs.scipy.org/doc/numpy/user/) * [Scipy docs](http://docs.scipy.org/doc/) * [Keras documentation](https://keras.io/) * [Tensorflow documentation](https://www.tensorflow.org/tutorials/) ## Other Resources [Rstudio](http://rstudio.org) has made some very nice cheatsheets for a number of workflows and tools we'll look at this semester. You can find them here: [https://www.rstudio.com/resources/cheatsheets/](https://www.rstudio.com/resources/cheatsheets/) You can find a nice list of free data science books here: [http://www.wzchen.com/data-science-books/](http://www.wzchen.com/data-science-books/) ## Data Repositories * [Kaggle](http://www.kaggle.com/): is a site hosting data competitions. It's a great source of datasets, questions and tutorials. * [Kaggle Datasets](https://www.kaggle.com/datasets) a new repository in Kaggle specifically for datasets, including code and scripts by users to get analyses on these datasets started. * [data.world](https://data.world/) another new repository of public datasets. * [data.gov](http://www.data.gov/): The U.S. goverment's open data portal * [Gapminder](http://www.gapminder.org/data/) * [Global Health Observatory](http://www.who.int/gho/database/en/): World Health Organization's data repository. * [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/): contains many datasets useful for testing and benchmarking learning algorithms. * [StatLib](http://lib.stat.cmu.edu/datasets/): Statistical software and dataset portal maintained by CMU. * Yet another list of public datasets: [https://github.com/caesar0301/awesome-public-datasets](https://github.com/caesar0301/awesome-public-datasets) * And yet another list of public datasets: [http://blog.bigml.com/list-of-public-data-sources-fit-for-machine-learning/#national_governments](http://blog.bigml.com/list-of-public-data-sources-fit-for-machine-learning/#national_governments) * [Airbnb data](http://insideairbnb.com/get-the-data.html) * [NYC Taxi ride data](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page) * [Resources for data journalism with R](http://rddj.info) * [Algorithms and datasets for computational social science and digital humanities](http://ropengov.github.io/) * [Open population datasets](https://code.facebook.com/posts/596471193873876/open-population-datasets-and-open-challenges/) * [Google BigQuery Public Datasets](https://cloud.google.com/bigquery/public-data/) * [AWS Public Datasets](https://aws.amazon.com/public-datasets/) * Yelp provides a dataset for use if you give them your email address [here](https://www.yelp.com/dataset/download) <file_sep>/materials/classroom-scripts/cmsc320_class-script_20190205.R library(tidyverse) arrest_tab <- read_csv("data/BPD_Arrests.csv") arrest_tab # index attributes by name select(arrest_tab, sex, age, district) # index attributes by position select(arrest_tab, 1, 3, 4) # index attributes by position (range) select(arrest_tab, 1:4) # index entities by position (range) slice(arrest_tab, 1:4) # filter by attribute values filter(arrest_tab, age < 17) filter(arrest_tab, age >= 18 & age <= 25) # filter to age >= 18 and age <= 25 # select sex, age, district select( filter(arrest_tab, age >= 18 & age <= 25), sex, age, district) # same pipeline arrest_tab %>% filter(age >= 18 & age <= 25) %>% select(sex, age, district) # exercise arrest_tab %>% filter(age >= 18 & age <= 25) %>% select(sex, district, arrestDate) %>% sample_frac(.5) # filters dataset to arrests from the “SOUTHERN” district occurring before “12:00” (arrestTime) # selects attributes, sex, age # samples 10 entities at random (sample_n) arrest_tab %>% filter(district == "SOUTHERN" & arrestTime < "12:00") %>% select(sex, age) %>% sample_n(10) # age in months mutate(arrest_tab, age_in_months = 12 * age) %>% select(-age) summarize(arrest_tab, num_arrests=n(), mean_age=mean(age, na.rm=TRUE)) arrest_tab %>% filter(!is.na(sex)) %>% group_by(sex) %>% summarize(num_arrests=n(), mean_age=mean(age, na.rm=TRUE)) library(lubridate) arrest_tab %>% mutate(early_arrest=arrestTime <= hms("12:00:00")) %>% select(age, arrestTime, early_arrest) %>% group_by(early_arrest) %>% summarize(num_arrests=n(), mean_age=mean(age, na.rm=TRUE)) ## filters records to # the southern district and # ages between 18 and 25 # computes mean arrest age for each sex arrest_tab %>% filter(district == "SOUTHERN", age >= 18, age <= 25) %>% group_by(sex) %>% summarize(mean_age=mean(age)) <file_sep>/materials/lecture-notes/33-dimensionality_reduction.Rmd # Unsupervised Learning: Dimensionality Reduction Recall that in unsupervised data we are interested in characterizing patterns in predictor space where observation measurements are represented. Mathematically, we stated as an interest in characterizing $p(X)$ over $p$-dimensional predictor space. Clustering methods assume that this space $p(X)$ can be partitioned into subspaces containing "similar" observations. In dimensionality reduction, we assume that observations can be represented in a space with dimension much lower than $p$. We will see two general strategies for dimensionality reduction: data transformations into spaces of smaller dimension that capture global properties of a data set $X$, and data embeddings into lower dimensional spaces that retain local properties of a data set $X$. ## Principal Component Analysis Principal Component Analysis (PCA) is a dimensionality reduction method. The goal is to _embed data in high dimensional space (e.g., observations with a large number of variables), onto a small number of dimensions_. Note that its most frequent use is in EDA and visualization, but it can also be helpful in regression (linear or logistic) where we can transform input variables into a smaller number of predictors for modeling. Mathematically, the PCA problem is: Given: - Data set $\{\mathbf{x}_1, \mathbf{x}_2, \ldots, \mathbf{x}_n\}$, where $\mathbf{x}_i$ is the vector of $p$ variable values for the $i$-th observation. Return: - Matrix $\left[ \phi_1, \phi_2, \ldots, \phi_p \right]$ of _linear transformations_ that retain _maximal variance_. You can think of the first vector $\phi_1$ as a linear transformation that embeds observations into 1 dimension: $$ Z_1 = \phi_{11}X_1 + \phi_{21} X_2 + \cdots + \phi_{p1} X_p $$ where $\phi_1$ is selected so that the resulting dataset $\{ z_1, \ldots, z_n\}$ has _maximum variance_. In order for this to make sense mathematically data has to be centered, i.e., each $X_j$ has mean equal to zero and transformation vector $\phi_1$ has to be normalized, i.e., $\sum_{j=1}^p \phi_{j1}^2=1$. We can find $\phi_1$ by solving an optimization problem: $$ \max_{\phi{11},\phi_{21},\ldots,\phi_{p1}} \frac{1}{n} \sum_{i=1}^n \left( \sum_{j=1}^p \phi_{j1} x_{ij} \right)^2 \\ \mathrm{s.t.} \sum_{j=1}^p \phi_{j1}^2 = 1 $$ Conceptually this optimization problem says _maximize variance_ but _subject to normalization constraint_. The second transformation $\phi_2$ is obtained next solving a similar problem with the added constraint that $\phi_2$ **is orthogonal** to $\phi_1$. Taken together $\left[ \phi_1, \phi_2 \right]$ define a pair of linear transformations of the data into 2 dimensional space. $$ Z_{n\times 2} = X_{n \times p} \left[ \phi_1, \phi_2 \right]_{p \times 2} $$ Each of the columns of the $Z$ matrix are called _Principal Components_. The units of the PCs are _meaningless_. In particular, comparing numbers _across_ PCs doesn't make mathematical sense. In practice, one may also use a scaling transformation on the variables $X_j$ to have unit variance. In general, if variables $X_j$ are measured in different units (e.g, miles vs. liters vs. dollars), variables should be scaled to have unit variance. Conversely, if they are all measured in the same units, they should be scaled. ```{r setup_pca, echo=TRUE, message=FALSE} library(tidyverse) library(readr) library(lubridate) datadir <- "data" url <- "http://files.zillowstatic.com/research/public/Affordability_Wide_2017Q4_Public.csv" filename <- basename(url) datafile <- file.path(datadir, filename) if (!file.exists(datafile)) { download.file(url, file.path(datadir, filename)) } afford_data <- read_csv(datafile) ``` ```{r tidy_pca, echo=TRUE, cache=FALSE, message=FALSE} tidy_afford <- afford_data %>% filter(Index == "Mortgage Affordability") %>% drop_na() %>% filter(RegionID != 0) %>% dplyr::select(RegionID, matches("^[1|2]")) %>% gather(time, affordability, matches("^[1|2]")) %>% type_convert(col_types=cols(time=col_date(format="%Y-%m"))) wide_afford_df <- tidy_afford %>% dplyr::select(RegionID, time, affordability) %>% spread(time, affordability) value_mat <- wide_afford_df %>% dplyr::select(-RegionID) %>% as.matrix() ``` ```{r zillow_pca, cache=TRUE, echo=TRUE} pca_res <- prcomp(value_mat, scale=FALSE) pca_au <- broom::augment(pca_res, wide_afford_df) pca_d <- broom::tidy(pca_res, matrix="d") pc_loading <- broom::tidy(pca_res, matrix="variables") %>% type_convert(col_types=cols(column=col_date("%Y-%m-%d"))) %>% mutate(PC=as.character(PC)) pc_mean <- pca_res$center pc_mean <- data_frame(column=names(pc_mean), PC="mean", value=pc_mean) %>% type_convert(col_types=cols(column=col_date("%Y-%m-%d"))) pc_loading <- pc_mean %>% bind_rows(pc_loading) ``` Here we plot the mortgage affordability data embedded into the first two principal components. There are some time in these two components that may be treated as outliers. Also, a clustering analysis in this reduced space seems like a reasonable next step. ```{r zillow_pcplot, echo=TRUE} ggplot(pca_au, aes(.fittedPC1, .fittedPC2)) + geom_point(size=2) + labs(x="PC1", y="PC2") ``` A natural question that arises: How many PCs should we consider in post-hoc analysis? One result of PCA is a measure of the variance corresponding to each PC relative to the total variance of the dataset. From that we can calculate the _percentage of variance explained_ for the $m$-th PC: $$ PVE_m=\frac{\sum_{i=1}^n z_{im}^2}{\sum_{j=1}^p \sum_{i=1}^n x_{ij}^2} $$ We can use this measure to choose number of PCs in an ad-hoc manner. In our case, using more than 10 or so PCs does not add information. ```{r pca_scree, echo=TRUE} pca_d <- broom::tidy(pca_res, matrix="d") pca_d %>% filter(PC <= 30) %>% ggplot(aes(PC, 100 * cumulative)) + geom_line(size=1.32) + labs(x="PC", y="Pct. Variance Explained") ``` A useful _rule of thumb_: - If no apparent patterns in first couple of PCs, stop! - Otherwise, look at other PCs using PVE as guide. There are bootstrap based methods to perform a statistically guided selection of the number of PCs. However, there is no commonly agreed upon method for choosing number of PCs used in practice, and methods are somewhat ad-hoc. ### Solving the PCA Algorithmically, the Principle Components solutions $\phi$ are obtained from the _singular value decomposition_ of observation matrix $X_{n\times p}=UDV^T$, where matrices $U$ and $V$ are orthogonal matrices ($U^TU=I$ and $V^TV=I$) called the left and right _singular vectors_ respectively. $D$ is a diagonal matrix with $d_1 \geq d_2 \geq \ldots d_p \geq 0$. These are referred to as the _singular values_. Using our previous notation $V$ is the transformation matrix $V=\left[\phi_1,\phi_2,\cdots,\phi_p \right]$. Principal components $Z$ are given by the columns of $UD$. Since $U$ is orthogonal, $d_j^2$ equals the variance of the $j$th PC. From this observation we also see that we can write original observations $x_i$ in terms of PCs $z$ and transformations $\phi$. Specifically $x_i = z_{i1}\phi_1 + z_{i2}\phi_2 + \cdots + z_{ip} \phi_p$. We can think of the $\phi_j$ vectors as a basis over which we can represent original observations $i$. For this reason, another useful post-hoc analysis is to plot the transformation vectors $\phi_1, \phi_2, \ldots$. Here we plot the mean time series (since we center observations $X$ before performing the embedding) along with the first three $\phi_j$ vectors. ```{r pca_loadings, echo=TRUE} pc_loading %>% mutate(PC=forcats::fct_shift(factor(PC),-1)) %>% filter(PC %in% c("mean",1:3)) %>% ggplot(aes(column, value)) + geom_line() + facet_wrap(~PC) ``` ## Multidimensional Scaling Multidimensional scaling is a similar approach to PCA but looks at the task in a little different manner. Given observations $x_1,\ldots,x_N$ in $p$ dimensions, let $d_{ij}$ be the distance between observations $i$ and $j$. We may also use this algorithm given distances initially instead of $p$ dimensional observations. Multidimensional Scaling (MDS) seeks to find embeddings $z_1, \ldots, z_N$ of $k$ dimensions for which Euclidean distance (in $k$ dimensional space) is close to the input distances $d_{ij}$. In _least squares_ MDS, we can do this by minimizing $$ S_M(z_1,\ldots,z_N) = \sum_{i\neq j} (d_{ij}- \|z_i - z_j\|)^2 $$ A gradient descent algorithm is used to minimize this function. A related method that tends to better capture small distances is given by the _Sammon_ mapping: $$ S_{S_m}(z_1,\ldots,z_N) = \sum_{i\neq j} \frac{(d_{ij}- \|z_i - z_j\|)^2}{d_{ij}} $$ Since MDS can use distances as input, it is suitable to use when distances between observations are all we have. ## Summary Principal Component Analysis is a conceptually simple but powerful EDA tool. It is very useful at many stages of analyses. PCA interpretation can be very ad-hoc, however. It is part of large set of unsupervised methods based on _matrix decompositions_, including Kernel PCA, Non-negative Matrix Factorization and others. Embedding methods seek to capture local properties of observations. A popular recent method is the t-SNE method. <file_sep>/materials/lecture-notes/deploy_static #!/bin/bash set -x HUGO_HOME="${PWD}/../.." STATIC_DIR="bookdown-notes" BOOKDOWN_DIR="materials/lecture-notes" TIMESTAMP=$(date +"%T") cd ${HUGO_HOME}/static git rm -rf ${STATIC_DIR}/* mkdir ${STATIC_DIR} cp -r ${HUGO_HOME}/${BOOKDOWN_DIR}/_book/* ${STATIC_DIR}/ git add --all ${STATIC_DIR}/ git commit -m "Update notes: ${TIMESTAMP}" <file_sep>/materials/homeworks/ab_testing.md --- title: A/B Testing author: CMSC320 geometry: margin=1in fontfamily: utopia --- In this exercise you will experiment with the application of statistical inference in A/B testing. You are a Data Scientist at jsFrameworksRUs and you are tasked with conducting an experiment to measure the effect of a webpage redesign on click rate for a link of interest. You decide to use hypothesis testing to analyze the data you gather from the experiment. ## Part 1: Compare to known click rate ($p_A=0.5$) In the first case, you assume the click rate for the original version of the page (version A) is $p_A=.5$. The experiment you carry out is pretty simple: show the webpage to $n=50$ subjects and record whether they click on the link of interest or not. You will use this experiment to estimate your parameter of interest: $p_B$, the click rate for the new page design (version B). When you carry out your experiment, you record that $s=30$ subjects clicked on the link of interest. Based on our discussion in class, you treat this as $n=50$ draws from a $\mathrm{Bernoulli}(.5)$ random variable, and use the sample mean $\overline{x}=\frac{1}{n} \sum_{i=1}^{n} x_i=\frac{30}{50}=0.6$ as your estimate $\hat{p}_B$. You remember that the hypothesis testing framework is setup in a way where you use your experiment to _reject_ the hypothesis that the new design _does not_ increase click rate. Therefore, you want to test the (null) hypothesis $p_B \leq p_A = 0.5$ and _reject_ it if $P(\overline{X} > \hat{p}_B) \leq \alpha$ under this hypothesis. Remember, $\alpha$ is the rejection level, and we will use $\alpha=0.05$ here. To compute $P(\overline{X} > \hat{p}_B)$ under the null hypothesis you will use the normal approximation given by the Central Limit Theorem (CLT). (a) Derive expressions for $\mathbb{E} \overline{X}$ and $\mathrm{var}[\overline {X}]$ under the null hypothesis in terms of $p_A$. You will need to use the properties of expectations and variances described below. Here, I give you the derivation for $\mathbb{E} [\overline{X}]$, you need to do the same for $\mathrm{var}[\overline{X}]$. \begin{eqnarray} \mathbb{E} [\overline{X}] & = & \mathbb{E} \left[ \frac{1}{n} \sum_{i=1}^n X_i \right] \\ {} & = & \frac{1}{n} \sum_{i=1}^n \mathbb{E} [X_i] \\ {} & = & \frac{1}{n} (np_A) \\ {} & = & p_A \end{eqnarray} (b) Based on your derivation, compute values for $\mathbb{E} [\overline{X}]$ and $\mathrm{var} [\overline{X}]$ based on $p_A=0.5$ and $n=50$. Use R or python to do this. (c) Using the result above, you can now use the CLT by approximating the distribution of $\overline{X}$ as $N(\mathbb{E} [\overline{X}], \sqrt{\mathrm{var}(\overline{X})})$. Based on this approximation, compute $P(\overline{X} > \hat{p}_B)$. Use the R function `pnorm`, or `norm.cdf` in `scipy.stats` to compute this. (d) Should you reject the null hypothesis $p_B \leq p_A$? Why? (e) What if you had observed the same $\hat{p}_B=0.6$ but with $n=100$ samples. Should you reject the null hypothesis in this case? Why? (f) What is the _smallest_ value $\hat{p}_B$ you would reject the null hypothesis with $n=100$. Use the `qnorm` function in R or `norm.ppf` in `scipy.stats` for this. Denote this _smallest_ value as $q_B$. (g) Based on (f), the smallest detectable improvement for $p_A=0.5$ with $n=100$ is then $q_B - p_A$. What is the smallest detectable improvement in your experiment (that is, with $n=50$)? ## Part 2: Compare to known click rate ($p_A=0.75$) In this second case, you also assume the click rate for the original version is known, but is $p_A=0.75$. The data recorded for the experiment is the same. You showed the new design to $n=50$ subjects and recorded that $s=30$ clicked on the link of interest. You want to test the hypothesis $p_B \leq 0.75$ and reject it if $P(\overline{X} > \hat{p}_B) < 0.05$ under this hypothesis. Note the probability in this case is different since $p_A = 0.75$. (a) What are the values of $\mathbb{E} [\overline{X}]$ and $\mathrm{var}(\overline{X})$ under the null hypothesis in this case. (b) Based on the CLT approximation, compute $P(\overline{X} > \hat{p}_B)$ under the null hypothesis. (c) Should you reject the null hypothesis $p_B \leq 0.75$? Why? (d) What if you had observed the same $\hat{p}_B=0.6$ but with $n=100$ samples. Should you reject the null hypothesis in this case? Why? (e) What is the _smallest_ value $\hat{p}_B$ you should reject the null hypothesis with $n=100$. Denote this _smallest_ value as $q_B$. (f) Based on (e), the smallest detectable improvement for $p_A=0.75$ with $n=100$ is then $q_B - p_A$. What is the smallest detectable improvement in your experiment ($n=50$)? ## Part 3 Consider your answers for parts (1g) and (2f). Is the smallest _detectable_ improvement in Question (1g) larger or smaller than in Question (2f)? Explain why this makes sense mathematically. ## Part 4: Comparing to estimated click rate $p_A$. In this more realistic case you estimate click rates for both page designs in your experiment. The experiment you carry out is as follows: when a customer visits the site, they are randomly (and independently from other customers) shown design A or B, and you record if they click on the link of interest or not. You did this for $n=100$ customers and recorded the following data: | design | number shown | number clicked | |--------|--------------|----------------| | A | $n_A=55$ | $s_A=35$ | | B | $n_B=45$ | $s_B=35$ | The null hypothesis we want to test in this case is that $p_B - p_A \leq 0$. That is, that the new design _does not_ improve the click rate. How can we use what we know about the CLT in this case? What we will do is treat estimates using sample means $\hat{p}_A=\overline{X}_A$ and $\hat{p}_B=\overline{X}_B$ as random variables and define a new random variable $Y=\overline{X}_B - \overline{X}_A$ corresponding to the _difference in click rates_ $p_B - p_A$. With that, we derive $\mathbb{E} [Y]$ and $\mathrm{var}(Y)$ under the null hypothesis that $p_B - p_A = 0$. (a) Derive expressions for $\mathbb{E} [Y]$ and $\mathrm{var}(Y)$ under the null hypothesis in terms of $p_A=p_B=p$. You will need to use the properties of expectations and variances described below. Here, I give you the derivation for $\mathbb{E} [Y]$, you need to do the same for $\mathrm{var}(Y)$. \begin{eqnarray} \mathbb{E} [Y] & = & \mathbb{E} \left[ \overline{X}_B - \overline{X}_A \right] \\ {} & = & \mathbb{E} [\overline{X}_B] - \mathbb{E} [\overline{X}_A] \\ {} & = & p_B - p_A \\ {} & = & 0 \end{eqnarray} (b) It looks like we will need an estimate of $p_A = p_B = p$ for our CLT approximation. Luckily, under the null hypothesis all $n=100$ observations from this experiment can be treated as independent identically distributed (iid) draws from a $\mathrm{Bernoulli}(p)$ distribution. Based on this observation, what would be your estimate of $p_A=p_B=p$? (c) Now that you have an estimate of $p$, compute a value for $\mathrm{var}(Y)$. (d) What is your estimate $\hat{y}$ of $p_B - p_A$ based on the data your recorded for this experiment? Now, we can reject the null hypothesis of no improvement if $p(Y > \hat{y}) \leq \alpha$ under the null hypothesis. (e) Using the CLT approximation, what is $P(Y > \hat{y})$ (f) Can you reject the null hypothesis of no improvement in this case? Why? Remember, we are using $\alpha=0.05$. ## Bonus: Smallest detectable improvement for estimated click rates We could compute smallest detectable improvements in parts 1 and 2 above because we assumed $p_A$ was known. For part 4, we don't know $p_A$ and instead estimate it, so we cannot compute a smallest detectable improvement before the experiment is run because we don't know $p_B = p_A = p$. We can however, compute what the smallest detectable difference _would be_ for different values of $p$. (a) Make a line plot, with $p$ in the x-axis and the smallest detectable difference as a function of $p$ in the y-axis. You should assume $n_A=55$ and $n_B=45$ as above. ## Expectation and variance properties ### Properties of expectation (i) $\mathbb{E}[aX] = a \mathbb{E}[X]$ for constant $a$ and random variable $X$ (ii) $\mathbb{E}[X + Y] = \mathbb{E}[X] + \mathbb{E}[Y]$ for random variables $X$ and $Y$ ### Properties of variance (i) $\mathrm{var}[aX] = a^2 \mathrm{var}[X]$ for constant $a$ and random variable $X$ (ii) $\mathrm{var}[X+Y]=\mathrm{var}[X] + \mathrm{var}[Y]$ for _independent_ random variables $X$ and $Y$ ## Submission Prepare an Rmarkdown file or Jupyter notebook with your derivations and answers, including code you used to get your answers. Knit to PDF (or save HTML to PDF) and submit to ELMS. <file_sep>/materials/lectures/Geometry/geometry.Rmd --- title: "Mathematical Background and the Perceptron" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" output: html_document --- # Machine Learning Preliminaries A common situation in data analysis is that one has an outcome attribute (variable) $Y$ and one or more independent covariate or predictor attributes $X_1,\ldots,X_p$. One usually observes these variables for multiple "instances" (or entities). (Note) As before, we use upper case to denote a random variable. To denote actual numbers we use lower case. One way to think about it: $Y$ has not happened yet, and when it does, we see $Y=y$. One may be interested in various things: - What effects do the covariates have on the outcome? - How well can we describe these effects? - Can we predict the outcome using the covariates?, etc... ## Motivating Example: Credit Analysis ```{r, echo=FALSE, message=FALSE} library(ISLR) library(tidyverse) data(Default) Default %>% head() %>% knitr::kable(format="html") ``` Task: predict account default What is the outcome $Y$? What are the predictors $X_j$? ## Terminology and notation We will be mixing the terminology of statistics and computer science. For example, we will sometimes call attributes $Y$ and $X$ the outcome/predictors, sometimes observed/covariates, and even input/output. We may call each entity an observation or example. We will denote predictors with $X$ and outcomes with $Y$ (quantitative) and $G$ (qualitative). Notice $G$ are not numbers, so we cannot add or multiply them. We will use $G$ to denote the set of possible values. For gender it would be $G=\{Male,Female\}$. ## From data to feature vectors The vast majority of ML algorithms we see in class treat instances as "feature vectors". We can represent each instance as a _vector_ in Euclidean space $\langle x_1,\ldots,x_p,y \rangle$. This means: - every measurement is represented as a continuous value - in particular, categorical variables become numeric (e.g., one-hot encoding) Here is the same credit data represented as a matrix of feature vectors ```{r, cache=TRUE, echo=FALSE} default_mat <- Default %>% mutate(default=case_when( default == "Yes" ~ +1, TRUE ~ -1 )) %>% mutate(student=case_when( student == "Yes" ~ 1, TRUE ~ 0 )) %>% as.matrix() default_mat %>% as_data_frame() %>% group_by(default) %>% sample_n(3) %>% ungroup() %>% sample_frac(1.0) %>% knitr::kable(format="html") ``` # Technical notation - Observed values will be denoted in lower case. So $x_i$ means the $i$th observation of the random variable $X$. - Matrices are represented with bold face upper case. For example $\mathbf{X}$ will represent all observed predictors. ## Technical notation - $N$ (or $n$) will usually mean the number of observations, or length of $Y$. $i$ will be used to denote which observation and $j$ to denote which covariate or predictor. - Vectors will not be bold, for example $x_i$ may mean all predictors for subject $i$, unless it is the vector of a particular predictor $\mathbf{x}_j$. - All vectors are assumed to be column vectors, so the $i$-th row of $\mathbf{X}$ will be $x_i'$, i.e., the transpose of $x_i$. ## Geometry and Distances Now that we think of instances as vectors we can do some interesting operations. Let's try a first one: define a distance between two instances using Euclidean distance $$d(x_1,x_2) = \sqrt{\sum_{j=1}^p(x_{1j}-x_{2j})^2}$$ ## K-nearest neighbor classification Now that we have a distance between instances we can create a classifier. Suppose we want to predict the class for an instance $x$. K-nearest neighbors uses the closest points in predictor space predict $Y$. $$ \hat{Y} = \frac{1}{k} \sum_{x_k \in N_k(x)} y_k. $$ $N_k(x)$ represents the $k$-nearest points to $x$. How would you use $\hat{Y}$ to make a prediction? ![](img/knnalgo.png)] An important notion in ML and prediction is _inductive bias_. What assumptions we make about our data that allow us to make predictions. In KNN, our _inductive bias_ is that points that are **nearby** will be of the same class. Parameter $K$ is a _hyper-parameter_, it's value may affect prediction accuracy significantly. Question: which situation may lead to _overfitting_, high or low values of $K$? Why? ### The importance of transformations Feature scaling is an important issue in distance-based methods. In the example below, which of these two features will affect distance the most? ```{r, echo=FALSE, message=FALSE, fig.width=4, fig.align="center"} library(cowplot) default_mat %>% as_data_frame() %>% ggplot(aes(x=student,y=balance,color=factor(default))) + geom_point() + coord_equal(ratio=1/1000) ``` ## Quick vector algebra review - A (real-valued) vector is just an array of real values, for instance $x = \langle 1, 2.5, −6 \rangle$ is a three-dimensional vector. - Vector sums are computed pointwise, and are only defined when dimensions match, so $$\langle 1, 2.5, −6 \rangle + \langle 2, −2.5, 3 \rangle = \langle 3, 0, −3 \rangle$$. In general, if $c = a + b$ then $cd = ad + bd$ for all vectors $d$. Vector addition can be viewed geometrically as taking a vector $a$, then tacking on $b$ to the end of it; the new end point is exactly $c$. ![](img/vector_sum.png)] _Scalar Multiplication_: vectors can be scaled by real values; $$2\langle 1, 2.5, −6 \rangle = \langle 2, 5, −12\rangle$$ In general, $ax = \langle ax_1, ax_2, \ldots, ax_p\rangle$ The norm of a vector $x$, written $\|x\|$ is its length. Unless otherwise specified, this is its Euclidean length, namely: $$\|x\| = \sqrt{\sum_{j=1}^p x_j^2}$$ ### Quiz Write Euclidean distance of vectors $u$ and $v$ as a vector norm The _dot product_, or _inner product_ of two vectors $u$ and $v$ is defined as $$u'v = \sum_{j=1}^p u_i v_i$$ A useful geometric interpretation of the inner product $v'u$ is that it gives the projection of $v$ onto $u$ (when $\|u\|=1$). ![](img/innerprod.png)] ## The curse of dimensionality Distance-based methods like KNN can be problematic in high-dimensional problems Consider the case where we have many covariates. We want to use $k$-nearest neighbor methods. Basically, we need to define distance and look for small multi-dimensional "balls" around the target points. With many covariates this becomes difficult. Imagine we have equally spaced data and that each covariate is in $[0,1]$. We want to something like kNN with a local focus that uses 10% of the data in the local fitting. If we have $p$ covariates and we are forming $p$-dimensional cubes, then each side of the cube must have size $l$ determined by $l \times l \times \dots \times l = l^p = .10$. If the number of covariates is p=10, then $l = .1^{1/10} = .8$. So it really isn't local! If we reduce the percent of data we consider to 1%, $l=0.63$. Still not very local. If we keep reducing the size of the neighborhoods we will end up with very small number of data points in each average and thus predictions with very large variance. This is known as *the curse of dimensionality*. Because of this so-called curse, it is not always possible to use KNN. But other methods, like Decision Trees, thrive on multidimensional data. ## Summary - We will represent many ML algorithms geometrically as vectors - Vector math review - K-nearest neighbors - The curse of dimensionality <file_sep>/materials/projects/Project5.Rmd --- title: "Project 5: Interactive Data Maps" author: "CMSC320" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` **Last Updated:** `r format(Sys.Date(), "%b %d, %Y")` Use `leaflet` and our previously used Baltimore crime dataset to make an interactive data map of Baltimore Crime. 1. Use this piece of code to download and prepare data for use in project ```{r get_and_prepare_data, message=FALSE, warning=FALSE} library(tidyverse) library(stringr) arrest_tab <- read_csv("http://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv") dat <- arrest_tab %>% filter(!is.na(`Location 1`)) %>% separate(`Location 1`, c("lat","lng"), sep=",") %>% mutate(lat=as.numeric(str_replace(lat, "\\(", ""))) %>% mutate(lng=as.numeric(str_replace(lng, "\\)", ""))) %>% sample_n(2000) dat ``` Note the attributes `lat` and `lng` which indicate geographical location as latitude (`lat`) and longitude (`lng`). 2. Use the `leaflet` package to create an interactive map of Baltimore ```{r start_map, warning=FALSE} library(leaflet) balto_map <- leaflet(dat) %>% addTiles() %>% setView(lat=39.29, lng=-76.61, zoom=11) balto_map ``` You can find more information about leaflet here: https://rstudio.github.io/leaflet/ 3. Add graphical elements to display the data. For instance, add circles, with colors indicating sex. Or circles with colors indicating race. Or anything else that strikes your fancy. These will be useful: - https://rstudio.github.io/leaflet/markers.html - https://www.rdocumentation.org/packages/leaflet/versions/1.1.0/topics/addControl 4. Embed your map in your Rmarkdown file, knit **to HTML** this time (not PDF) and submit the HTML file to ELMS. ## Submission Prepare and knit an Rmarkdown file that includes: (a) code to carry out each of the steps above, (b) output showing the result of your code (in this case the interactive map), and (c) a short prose description of your interactive map (i.e., what are you showing with this data and map). Remember, the writeup you are preparing is intended to communicate your data analysis effectively. Thoughtlessly showing large amounts of output in your writeup defeats that purpose. ## Python You can use the ipyleaflet widget https://github.com/jupyter-widgets/ipyleaflet to implement this in a Jupyter notebook. To submit, export your notebook **as HTML** and submit the resulting html to ELMS. Code to prepare data: ```{r setup_py, echo=FALSE} library(reticulate) use_condaenv("cmsc320") ``` ```{python get_and_prepare_data_py, message=FALSE, warning=FALSE} import pandas as pd arrest_tab = pd.read_csv("http://www.hcbravo.org/IntroDataSci/misc/BPD_Arrests.csv") dat = arrest_tab[arrest_tab['Location 1'].notna()].copy() dat['lat'] = dat['Location 1'].str.extract("\(([0-9\.\-]*).*\)", expand=False) dat['lng'] = dat['Location 1'].str.extract("\(.*,(.*)\)", expand=False) dat = dat.sample(n=2000) dat ``` <file_sep>/materials/lecture-notes/20-eda_summary_stats.Rmd # Exploratory Data Analysis: Summary Statistics ```{r eda_stats_setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(tidyverse) theme_set(theme_bw()) ``` Let's continue our discussion of Exploratory Data Analysis. In the previous section we saw ways of visualizing attributes (variables) using plots to start understanding properties of how data is distributed, an essential and preliminary step in data analysis. In this section, we start discussing statistical, or numerical, summaries of data to quantify properties that we observed using visual summaries and representations. Remember that one purpose of EDA is to spot problems in data (as part of data wrangling) and understand variable properties like: - central trends (mean) - spread (variance) - skew - suggest possible modeling strategies (e.g., probability distributions) We also want to use EDA to understand relationship between pairs of variables, e.g. their correlation or covariance. One last note on EDA. <NAME> was an exceptional scientist/mathematician, who had profound impact on statistics and Computer Science. A lot of what we cover in EDA is based on his groundbreaking work. I highly recommend you read more about him: [https://www.stat.berkeley.edu/~brill/Papers/life.pdf](https://www.stat.berkeley.edu/~brill/Papers/life.pdf). ## Range Part of our goal is to understand how variables are distributed in a given dataset. Note, again, that we are not using _distributed_ in a formal mathematical (or probabilistic) sense. All statements we are making here are based on data at hand, so we could refer to this as the _empirical distribution_ of data. Here, _empirical_ is used in the sense that this is data resulting from an experiment. Let's use a dataset on diamond characteristics as an example. ```{r} data(diamonds) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) ``` (Here's some help interpreting these variables: [https://en.wikipedia.org/wiki/Diamond_(gemstone)#Gemological_characteristics](https://en.wikipedia.org/wiki/Diamond_(gemstone)#Gemological_characteristics)). Let's start using some notation to make talking about this a bit more efficient. We assume that we have data across $n$ entitites (or observational units) for $p$ attributes. In this dataset $n=`r nrow(diamonds)`$ and $p=`r ncol(diamonds)`$. However, let's consider a single attribute, and denote the data for that attribute (or variable) as $x_1, x_2, \ldots, x_n$. Ok, so what's the first question we want to ask about how data is distributed? Since we want to understand how data is distributed across a _range_, we should first define the range. ```{r} diamonds %>% summarize(min_depth = min(depth), max_depth = max(depth)) ``` We use notation $x_{(1)}$ and $x_{(n)}$ to denote the minimum and maximum statistics. In general, we use notation $x_{(q)}$ for the rank statistics, e.g., the $q$th largest value in the data. ## Central Tendency Now that we know the range over which data is distributed, we can figure out a first summary of data is distributed across this range. Let's start with the _center_ of the data: the _median_ is a statistic defined such that half of the data has a smaller value. We can use notation $x_{(n/2)}$ (a rank statistic) to represent the median. Note that we can use an algorithm based on the quicksort partition scheme to compute the median in linear time (on average). ```{r} diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=median(depth)), color="red") ``` ### Derivation of the mean as central tendency statistic Of course, the best known statistic for central tendency is the _mean_, or average of the data: $\overline{x} = \frac{1}{n} \sum_{i=1}^n x_i$. It turns out that in this case, we can be a bit more formal about "center" means in this case. Let's say that the _center_ of a dataset is a point in the range of the data that is _close_ to the data. To say that something is _close_ we need a measure of _distance_. So for two points $x_1$ and $x_2$ what should we use for distance? We could base it on $(x_1 - x_2)$ but that's not enough since its sign depends on the order in which we write it. Using the absolute value solves that problem $|x_1 - x_2|$ since now the sign doesn't matter, but this has some issues that we will see later. So, next best thing we can do is use the square of the difference. So, in this case, the distance between data point $x_1$ and $x_2$ is $(x_1 - x_2)^2$. Here is a fun question: what's the largest distance between two points in our dataset? So, to define the _center_, let's build a criterion based on this distance by adding this distance across all points in our dataset: $$ RSS(\mu) = \frac{1}{2} \sum_{i=1}^n (x_i - \mu)^2 $$ Here RSS means _residual sum of squares_, and we $\mu$ to stand for candidate values of _center_. We can plot RSS for different values of $\mu$: ```{r, echo=FALSE} rss <- function(mu) { 0.5 * sum((diamonds$depth - mu)^2)} mu_candidates <- seq(min(diamonds$depth), max(diamonds$depth), len=1000) plot(mu_candidates, sapply(mu_candidates, rss), xlab="Depth", ylab="RSS", type="l", lwd=2, main="Residual Sum of Squares") ``` Now, what should our "center" estimate be? We want a value that is _close_ to the data based on RSS! So we need to find the value in the range that minimizes RSS. From calculus, we know that a necessary condition for the minimizer $\hat{\mu}$ of RSS is that the derivative of RSS is zero at that point. So, the strategy to minimize RSS is to compute its derivative, and find the value of $\mu$ where it equals zero. So, let's find the derivative of RSS: $$ \begin{eqnarray} \frac{\partial}{\partial \mu} \frac{1}{2} \sum_{i=1}^n (x_i - \mu)^2 & = & \frac{1}{2} \sum_{i=1}^n \frac{\partial}{\partial \mu} (x_i - \mu)^2 \; \textrm{(sum rule)}\\ {} & = & \frac{1}{2} \sum_{i=1}^n 2(x_i - \mu) \times \frac{\partial}{\partial \mu} (x_i - \mu) \; \textrm{(power rule and chain rule)}\\ {} & = & \frac{1}{2} \sum_{i=1}^n 2(x_i - \mu) \times (-1) \; \textrm{(sum rule and power rule)}\\ {} & = & \frac{1}{2} 2 \sum_{i=1}^n (\mu - x_i) \textrm{(rearranging)}\\ {} & = & \sum_{i=1}^n \mu - \sum_{i=1}^n x_i \\ {} & = & n\mu - \sum_{i=1}^n x_i \end{eqnarray} $$ ```{r, echo=FALSE} rss_deriv <- function(mu) { nrow(diamonds)*mu - sum(diamonds$depth)} plot(mu_candidates, sapply(mu_candidates, rss_deriv), xlab="Depth", ylab="RSS Derivative", type="l", lwd=2, main="Derivative of RSS") abline(h=0,lty=2,lwd=1.6) ``` Next, we set that equal to zero and find the value of $\mu$ that solves that equation: $$ \begin{eqnarray} \frac{\partial}{\partial \mu} & = & 0 & \Rightarrow \\ n\mu - \sum_{i=1}^n x_i & = & 0 & \Rightarrow \\ n\mu & = & \sum_{i=1}^n x_i & \Rightarrow \\ \mu & = & \frac{1}{n} \sum_{i=1}^n x_i & {} \end{eqnarray} $$ That's the average we know and love! So the fact you should remember: **The mean is the value that minimizes RSS for a vector of attribute values** It equals the value where the derivative of RSS is 0: ```{r, echo=FALSE} our_mean <- sum(diamonds$depth) / nrow(diamonds) plot(mu_candidates, sapply(mu_candidates, rss_deriv), xlab="Depth", ylab="RSS Derivative", type="l", lwd=1.3) abline(v=our_mean, lwd=2.3, col="blue") abline(h=0, lty=2, lwd=1.6) ``` It is the value that minimizes RSS: ```{r, echo=FALSE} plot(mu_candidates, sapply(mu_candidates, rss), xlab="Depth", ylab="RSS Derivative", type="l", lwd=1.3) abline(v=our_mean, lwd=2.3, col="blue") ``` And it serves as an estimate of central tendency of the dataset: ```{r, echo=FALSE} diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=median(depth)), color="red", size=1.3) + geom_vline(aes(xintercept=mean(depth)), color="blue", size=1.3) ``` Note that in this dataset the mean and median are not exactly equal, but are very close: ```{r} diamonds %>% summarize(mean_depth = mean(depth), median_depth = median(depth)) ``` One last note, there is a similar argument to define the median as a measure of _center_. In this case, instead of using RSS we use a different criterion: the sum of absolute deviations $$ SAD(m) = \sum_{i=1}^n |x_i - m|. $$ The median is the minimizer of this criterion. ```{r, echo=FALSE} sad <- function(m) sum(abs(diamonds$depth - m)) plot(mu_candidates, sapply(mu_candidates, sad), xlab="Depth", ylab="Sum of Absolute Deviations", type="l", lwd=1.3) abline(v=median(diamonds$depth), lwd=2.3, col="red") ``` ## Spread Now that we have a measure of center, we can now discuss how data is _spread_ around that center. ### Variance For the mean, we have a convenient way of describing this: the average distance (using squared difference) from the mean. We call this the _variance_ of the data: $$ \mathrm{var}(x) = \frac{1}{n} \sum_{i=1}^n (x_i - \overline{x})^2 $$ You will also see it with a slightly different constant in the front for technical reasons that we may discuss later on: $$ \mathrm{var}(x) = \frac{1}{n-1} \sum_{i=1}^n (x_i - \overline{x})^2 $$ Variance is a commonly used statistic for spread but it has the disadvantage that its units are not easy to conceptualize (e.g., squared diamond depth). A spread statistic that is in the same units as the data is the _standard deviation_, which is just the squared root of variance: $$ \mathrm{sd}(x) = \sqrt{\frac{1}{n}\sum_{i=1}^n (x_i - \overline{x})^2} $$ We can also use _standard deviations_ as an interpretable unit of how far a given data point is from the mean: ```{r} # create a df with standard deviation values to plot sds_to_plot <- seq(-6,6) sd_df <- diamonds %>% summarize(mean_depth = mean(depth), sd_depth = sd(depth)) %>% slice(rep_along(sds_to_plot, 1)) %>% mutate(sd_to_plot=sds_to_plot) %>% mutate(sd_val = mean_depth + sd_to_plot * sd_depth) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=mean(depth)), col="blue", size=1.5) + geom_vline(aes(xintercept = sd_val), data=sd_df, linetype=2, size=1.2 - abs(seq(-1,1, len=13))) ``` As a rough guide, we can use "standard deviations away from the mean" as a measure of spread as follows: | SDs | proportion | Interpretation | |-----|------------|----------------| | 1 | `r round(1-2*pnorm(-1),2)` | `r 100*round(1-2*pnorm(-1),2)`% of the data is within $\pm$ 1 sds | | 2 | `r round(1-2*pnorm(-2),2)` | `r 100*round(1-2*pnorm(-2),2)`% of the data is within $\pm$ 2 sds | | 3 | `r round(1-2*pnorm(-3),4)` | `r 100*round(1-2*pnorm(-3),4)`% of the data is within $\pm$ 3 sds | | 4 | `r round(1-2*pnorm(-4),6)` | `r 100*round(1-2*pnorm(-4),6)`% of the data is within $\pm$ 4 sds | | 5 | `r round(1-2*pnorm(-5),8)` | `r 100*round(1-2*pnorm(-5),8)`% of the data is within $\pm$ 5 sds | | 6 | `r round(1-2*pnorm(-6),10)` | `r 100*round(1-2*pnorm(-6),10)`% of the data is within $\pm$ 6 sds | We will see later how these rough approximations are derived from a mathematical assumption about how data is distributed _beyond_ the data we have at hand. ### Spread estimates using rank statistics Just like we saw how the median is a rank statistic used to describe central tendency, we can also use rank statistics to describe spread. For this we use two more rank statistics: the first and third _quartiles_, $x_{(n/4)}$ and $x_{(3n/4)}$ respectively: ```{r, warning=FALSE} quartile_df <- diamonds %>% summarize(first=quantile(diamonds$depth, p=1/4), third=quantile(diamonds$depth, p=3/4)) %>% tidyr::gather(quartile, value) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=median(depth)), size=1.3, color="red") + geom_vline(aes(xintercept=value), data=quartile_df, size=1,color="red", linetype=2) ``` Note, the five order statistics we have seen so far: minimum, maximum, median and first and third quartiles are so frequently used that this is exactly what `R` uses by default as a `summary` of a numeric vector of data (along with the mean): ```{r} summary(diamonds$depth) ``` This five-number summary are also all of the statistics used to construct a boxplot to summarize data distribution. In particular, the _inter-quartile range_, which is defined as the difference between the third and first quartile: $\mathrm{IQR}(x) = x_{(3n/4)} - x_{(1n/4)}$ gives a measure of spread. The interpretation here is that half the data is within the IQR around the median. ```{r} diamonds %>% summarize(sd_depth = sd(depth), iqr_depth = IQR(depth)) ``` ## Outliers We can use estimates of spread to identify outlier values in a dataset. Given an estimate of spread based on the techniques we've just seen, we can identify values that are _unusually_ far away from the center of the distribution. One often cited rule of thumb is based on using standard deviation estimates. We can identify outliers as the set $$ \mathrm{outliers_{sd}}(x) = \{x_j \, | \, |x_j| > \overline{x} + k \times \mathrm{sd}(x) \} $$ where $\overline{x}$ is the sample mean of the data and $\mathrm{sd}(x)$ it's standard deviation. Multiplier $k$ determines if we are identifying (in Tukey's nomenclature) _outliers_ or points that are _far out_. Here is an example usage: ```{r, warnings=FALSE} outlier_df <- diamonds %>% summarize(mean_depth=mean(depth), sd_depth=sd(depth)) %>% slice(rep(1, 4)) %>% mutate(multiplier = c(-3, -1.5, 1.5, 3)) %>% mutate(outlier_limit = mean_depth + multiplier * sd_depth) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=outlier_limit), data=outlier_df, color="blue") ``` While this method works relatively well in practice, it presents a fundamental problem. Severe outliers can significantly affect spread estimates based on standard deviation. Specifically, spread estimates will be inflated in the presence of severe outliers. To circumvent this problem, we use rank-based estimates of spread to identify outliers as: $$ \mathrm{outliers_{IQR}}(x) = \{x_j \, | \, x_j < x_{(1/4)} - k \times \mathrm{IQR}(x) \; \mathrm{ or } \; x_j > x_{(3/4)} + k \times \mathrm{IQR}(x)\} $$ This is usually referred to as the _Tukey outlier rule_, with multiplier $k$ serving the same role as before. We use the IQR here because it is less susceptible to be inflated by severe outliers in the dataset. It also works better for skewed data than the method based on standard deviation. Here we demonstrate its use again: ```{r, warnings=FALSE} outlier_df <- diamonds %>% summarize(q1=quantile(depth, 1/4), q3=quantile(depth, 3/4), iqr=IQR(depth)) %>% slice(rep(1, 2)) %>% mutate(multiplier = c(1.5, 3)) %>% mutate(lower_outlier_limit = q1 - multiplier * iqr) %>% mutate(upper_outlier_limit = q3 + multiplier * iqr) diamonds %>% ggplot(aes(x=depth)) + geom_histogram(bins=100) + geom_vline(aes(xintercept=lower_outlier_limit), data=outlier_df, color="red") + geom_vline(aes(xintercept=upper_outlier_limit), data=outlier_df, color="red") ``` ## Skew One last thought. Although there are formal ways of defining this precisely, the five-number summary can be used to understand if data is skewed. How? Consider the differences between the first and third quartiles to the median: ```{r} diamonds %>% summarize(med_depth = median(depth), q1_depth = quantile(depth, 1/4), q3_depth = quantile(depth, 3/4)) %>% mutate(d1_depth = med_depth - q1_depth, d2_depth = q3_depth - med_depth) %>% select(d1_depth, d2_depth) ``` If one of these differences is larger than the other, then that indicates that this dataset might be skewed, that is, that the range of data on one side of the median is longer (or shorter) than the range of data on the other side of the median. Do you think our diamond depth dataset is skewed? ## Covariance and correlation The scatter plot is a visual way of observing relationships between pairs of variables. Like descriptions of distributions of single variables, we would like to construct statistics that summarize the relationship between two variables quantitatively. To do this we will extend our notion of _spread_ (or variation of data around the mean) to the notion of _co-variation_: do pairs of variables vary around the mean in the same way. Consider now data for two variables over the same $n$ entities: $(x_1,y_1), (x_2,y_2), \ldots, (x_n,y_n)$. For example, for each diamond, we have `carat` and `price` as two variables: ```{r} diamonds %>% ggplot(aes(x=carat, y=price)) + geom_point() + geom_hline(aes(yintercept = mean(price)), color="blue", lty=2) + geom_vline(aes(xintercept = mean(carat)), color="blue", lty=2) ``` We want to capture the relationship: does $x_i$ vary in the same direction and scale away from its mean as $y_i$? This leads to _covariance_ $$ cov(x,y) = \frac{1}{n} \sum_{i=1}^n (x_i - \overline{x})(y_i - \overline{y}) $$ Think of what would the covariance for $x$ and $y$ be if $x_i$ varies in the _opposite_ direction as $y_i$? Just like variance, we have an issue with units and interpretation for covariance, so we introduce _correlation_ (formally, Pearson's correlation coefficient) to summarize this relationship in a _unit-less_ way: $$ cor(x,y) = \frac{cov(x,y)}{sd(x) sd(y)} $$ As before, we can also use rank statistics to define a measure of how two variables are associated. One of these, _Spearman correlation_ is commonly used. It is defined as the Pearson correlation coefficient of the ranks (rather than actual values) of pairs of variables. ## Postscript: Finding Maxima/Minima using Derivatives The values at which a function attains its maximum value are called _maxima_ ( _maximum_ if unique) of the function. Similarly, the values at which a function attains its minimum value are called _minima_ ( _minimum_ if unique) of the function. In a smoothly changing function maxima or minima are found where the function flattens (slope becomes $0$). The first derivative of the function tells us where the slope is $0$. This is the _first derivate test_. The derivate of the slope (the second derivative of the original function) can be useful to know if the value we found from first derivate test is a maxima or minima. When a function's slope is zero at $x$, and the second derivative at $x$ is: \begin{itemize} \item less than 0, it is a local maximum \item greater than 0, it is a local minimum \item equal to 0, then the test fails (there may be other ways of finding out though) \end{itemize} This is called the _second derivate test_. ### Steps to find Maxima/Minima of function $f(x)$ 1. Find the value(s) at which $f'(x)=0$ (First derivative test). 2. Find the value of the second derivative for each of the x's found in step 1 (Second derivative test). 3. If the value of the second derivative at $x$ is: - less than 0, it is a local maximum - greater than 0, it is a local minimum - equal to 0, then the test fails (no minima or maxima) ### Notes on Finding Derivatives #### Sum Rule {-} The derivative of the sum of two functions is the sum of the derivatives of the two functions: \begin{eqnarray*} \frac{d}{dx}(f(x)+g(x)) = \frac{d}{dx}(f(x)) + \frac{d}{dx}(g(x)) \end{eqnarray*} Similarly, the derivative of the difference of two functions is the difference of the derivatives of the two functions. #### Power Rule {-} If we have a function f(x) of the form $f(x)=x^{n}$ for any integer n, \begin{eqnarray*} \frac{d}{dx}(f(x)) = \frac{d}{dx}(x^{n}) = nx^{n-1} \end{eqnarray*} #### Chain Rule {-} If we have two functions of the form $f(x)$ and $g(x)$, the chain rule can be stated as follows: \begin{eqnarray*} \frac{d}{dx}(f(g(x)) = f^{'}(g(x)) g^{'}(x) \end{eqnarray*} \noindent \textit{Eg.} Differentiate $y=(3x+1)^{2}$ with respect to x.\\ \textit{Solution.} Applying the above equation, we have the following: \begin{eqnarray*} \frac{d}{dx}((3x+1)^{2}) = 2(3x+1)^{2-1} \frac{d}{dx}((3x+1)) = 2(3x+1)(3) = 6(3x+1) \end{eqnarray*} #### Product Rule {-} If we have two functions f(x) and g(x), \begin{eqnarray*} \frac{d}{dx}(f(x)g(x)) = f(x)\frac{d}{dx}(g(x)) + g(x)\frac{d}{dx}(f(x)) = f(x)g'(x) + g(x)f'(x) \end{eqnarray*} #### Quotient Rule {-} If we have two functions f(x) and g(x) ($g(x)\neq 0$), \begin{eqnarray*} \frac{d}{dx}\frac{f(x)}{g(x)} = \left(\frac{g(x)\frac{d}{dx} (f(x)) - f(x)\frac{d}{dx} (g(x)) }{g(x)^{2}}\right) \end{eqnarray*} ### Resources: - A useful calculus cheat sheet: http://tutorial.math.lamar.edu/pdf/Calculus_Cheat_Sheet_Derivatives.pdf - Discussion on finding maxima/minima: [https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=3&ved=0ahUKEwi32ZGPvbbPAhUCdj4KHcdyDZAQFggnMAI&url=http%3A%2F%2Fwww.math.psu.edu%2Ftseng%2Fclass%2FMath140A%2FNotes-First_and_Second_Derivative_Tests.doc&usg=AFQjCNEUih6RsfXq933pFwmoPk0yOvc1Mg&sig2=zyxh1-zWe7TY7zYwnhpH8g&cad=rja ](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=3&ved=0ahUKEwi32ZGPvbbPAhUCdj4KHcdyDZAQFggnMAI&url=http%3A%2F%2Fwww.math.psu.edu%2Ftseng%2Fclass%2FMath140A%2FNotes-First_and_Second_Derivative_Tests.doc&usg=AFQjCNEUih6RsfXq933pFwmoPk0yOvc1Mg&sig2=zyxh1-zWe7TY7zYwnhpH8g&cad=rja) <file_sep>/materials/notebooks/sql_pr.Rmd --- title: "Building a complex query with SQL" author: "<NAME>" date: "2/15/2018" output: html_notebook editor_options: chunk_output_type: inline --- ```{r} dbfile <- "/Users/hcorrada/Teaching/CMSC320_Spring2018/materials/lecture-notes/data/lahman2016.sqlite" db <- DBI::dbConnect(RSQLite::SQLite(), dbfile) ``` We want to find "the ten players from PR _not_ in HOF with the most career hits". We build the query step by step. - Players from PR ```{sql, connection=db} select playerID, nameFirst, nameLast from Master where birthCountry = "P.R." ``` - Players in the HOF ```{sql, connection=db} select distinct playerID from HallOfFame where inducted = "Y" ``` - Players from PR _not_ in HOF ```{sql, connection=db} select m.playerID, nameFirst, nameLast from Master as m left join (select distinct playerID from HallOfFame where inducted = "Y") as hof on m.playerID = hof.playerID where birthCountry = "P.R." and hof.playerID is NULL ``` - Add hits per season to table of players from PR _not_in HOF ```{sql, connection=db} with pr_not_in_hof(playerID, nameFirst, nameLast) as ( select m.playerID, nameFirst, nameLast from Master as m left join ( select distinct playerID from HallOfFame where inducted = "Y" ) as hof on m.playerID = hof.playerID where birthCountry = "P.R." and hof.playerID is NULL ) select pr.playerID, b.H, pr.nameFirst, pr.nameLast from pr_not_in_hof as pr join Batting as b on pr.playerID = b.playerID ``` - Total number of career hits for players from PR _not_ in HOF ```{sql, connection=db} with pr_not_in_hof(playerID, nameFirst, nameLast) as ( select m.playerID, nameFirst, nameLast from Master as m left join ( select distinct playerID from HallOfFame where inducted = "Y" ) as hof on m.playerID = hof.playerID where birthCountry = "P.R." and hof.playerID is NULL ) select pr.playerID, sum(b.H) as total_hits, pr.nameFirst, pr.nameLast from pr_not_in_hof as pr join Batting as b on pr.playerID = b.playerID group by pr.playerID ``` - Final query: Ten players from PR _not_ in HOF with the most career hits. ```{sql, connection=db} with pr_not_in_hof(playerID, nameFirst, nameLast) as ( select m.playerID, nameFirst, nameLast from Master as m left join ( select distinct playerID from HallOfFame where inducted = "Y" ) as hof on m.playerID = hof.playerID where birthCountry = "P.R." and hof.playerID is NULL ) select pr.playerID, sum(b.H) as total_hits, pr.nameFirst, pr.nameLast from pr_not_in_hof as pr join Batting as b on pr.playerID = b.playerID group by pr.playerID order by total_hits desc limit 10 ``` ```{r} DBI::dbDisconnect(db) ```<file_sep>/content/campus-closure/index.md +++ title = "Campus Closure Procedures Spring 2020" +++ Per campus guidelines, https://umd.edu/virusinfo#community, course logistics will be adjusted as follows: - For the week of March 23 - classes are cancelled - recordings of lectures for the material covered those days will be made available, students should review that material in lecture notes and slides, and recordings individually. - office hours will be held online (see below) - For March 30 - April 10, - classroom instruction moves online - recordings of lectures for the material covered those days will be available by lecture time. HCB will hold virtual Q&A session (zoom through ELMS) 5:45-6:15 to discuss that material. - office hours will be held online (see below) ### Office Hours The office hours schedule will continue to be observed. However, office hours will be held online using zoom. 1. Office hours will be managed through the https://officehours.cs.umd.edu app. A code to join the csmc320 space will be posted on piazza. 2. To join an office hour, join the queue on the office hours app for a TA on duty and join their zoom meeting. Zoom meeting URLs will be posted on piazza. 3. Each TA will go through the queue using breakout rooms on zoom with each student. <file_sep>/materials/homeworks/midterm_review.md --- title: Midterm material author: CMSC 320 geometry: margin=1in fontfamily: utopia --- This document describes material that will be fair game in the midterm exam. Each section is divided into two levels (level 1 and 2). Mastery of level 1 material is essential to do well in the midterm, level 2 is needed to do great in the midterm. ## Preliminaries ### Level 1 - Data Analysis Cycle: acquisition -> preparation -> modeling -> communication ### Level 2 - Data Analysis Cycle: as presented in slides/Zumen & Mount ## R ### Level 1 - Variables vs. values - All the many ways to index vectors/data.frames - Functions, conditionals, loops - Lists vs. vectors - Matrices ### Level 2 - vectorization - the `apply` family ## Measurement types ## Level 1 - categorical - ordered categorical (ordinal) - discrete numerical - continuous numerical ## Level 2 - factors/levels in R - the importance of units ## Best practices ## Level 1 - the importance of reproducibility - tools to improve reproducibility - data science ethics and responsible conduct of research ## Level 2 - the importance of thinking like an experimentalist ## Data Wrangling ## Level 1 - `dplyr` single table verbs - the Select-From-Where SQL query - different join semantics - why are database systems helpful and useful? ## Level 2 - Keys/Foreign Keys in the Entity-Relationship data model - How an ER diagram is converted into a set of Relations (data tables) ## Tidy Data and Data Models ## Level 1 - Components of a Data Model - Basics of the Entity-Relationship and Relational Data Models - The components of an ER diagram - The relationship between tidy data, the ER and the Relational models ## Level 2 - JSON - Other data models ## Exploratory Data Analysis ## Level 1 ### Summary Statistics - Distributional characteristics: range, central tendency, spread - Statistical summaries: sample mean, sample median, sample standard deviation ### Visualization for EDA - Plots to show data distribution for one variable/two variables - The data/aesthetic mapping/geometric representation scheme for data visualization (ggplot) ### Data transformations - difference between data missing systematically vs. missing at random - Centering and scaling data transformation (standardization) - Imputing continuous numeric missing data - Standard units - Ways of discretizing continuous numeric data ## Level 2 - The derivation of the mean as an _optimal_ central tendency statistic - Rank summary statistics - Distributional characteristic: skew - The five-number summary of data and relationship to boxplot - Statistical summaries of pairwise relationship between variables: sample covariance and correlation - The logarithmic transformation for skewed data ## Introduction to Statistical Learning ## Level 1 - Sources of randomness and stochasticity in data - The "inverse problem" way of thinking about data analysis - Properties of discrete probability distributions - Expectation for discrete probability distributions - How the sample mean is an _estimate_ of expected value - The law of large numbers and the central limit theorem - The statement of the central limit theorem - The Bernoulli, Binomial and Normal distributions - Joint and conditional distribution for discrete probability distributions - Conditional expectation for discrete probability distributions ## Level 2 - Using the CLT to get a confidence interval for the mean - Using the CLT to test a simple hypothesis about the mean # Midterm Structure The midterm will consist of three sections: ~10-15 multiple choice questions, ~5-8 short questions, and 1 or 2 longer questions. Multiple choice will test concept definitions along with problems similar to written exercises in class. Short questions will be similar to written problems done in class or homework, along with concept questions where longer written answers are required. Longer questions are for problem solving (e.g., design a data pipeline to carry out a specific task, prove a property of a summary statistic, etc.) <file_sep>/materials/homeworks/final_review.md --- title: Final Material author: CMSC 320 geometry: margin=1in fontfamily: utopia --- This document describes what will be fair game in the final exam. Each section is divided into two levels (level 1 and 2). Mastery of level 1 material is essential to do well in the final, level 2 is needed to do great in the final. The final covers material from the entire course, but is weighted roughly 2/3 towards material in the second part of the semester (starting with linear models below). ## Preliminaries ### Level 1 - Data Analysis Cycle: preparation -> modeling -> communication ### Level 2 - Data Analysis Cycle: as presented in slides/Zumen & Mount ## R ### Level 1 - Variables vs. values - All the many ways to index vectors/data.frames - Functions, conditionals, loops - Lists vs. vectors - Matrices ### Level 2 - vectorization - the `apply` family ## Measurement types ### Level 1 - categorical - ordered categorical (ordinal) - discrete numerical - continuous numerical ### Level 2 - factors/levels in R - the importance of units ## Best practices ### Level 1 - the importance of reproducibility - tools to improve reproducibility - data science ethics and responsible conduct of research ### Level 2 - the importance of thinking like an experimentalist ## Data Wrangling ## Level 1 - `dplyr` single table verbs - the Select-From-Where SQL query - different join semantics - why are database systems helpful and useful? ## Level 2 - Keys/Foreign Keys in the Entity-Relationship data model - How an ER diagram is converted into a set of Relations (data tables) ## Tidy Data and Data Models ## Level 1 - Components of a Data Model - Basics of the Entity-Relationship and Relational Data Models - The components of an ER diagram - The relationship between tidy data, the ER and the Relational models ## Level 2 - JSON - Other data models ## Exploratory Data Analysis ## Level 1 ### Summary Statistics - Distributional characteristics: range, central tendency, spread - Statistical summaries: sample mean, sample median, sample standard deviation ### Visualization for EDA - Plots to show data distribution for one variable/two variables - The data/aesthetic mapping/geometric representation scheme for data visualization (ggplot) ### Data transformations - difference between data missing systematically vs. missing at random - Centering and scaling data transformation (standardization) - Imputing continuous numeric missing data - Standard units - Ways of discretizing continuous numeric data ## Level 2 - The derivation of the mean as an _optimal_ central tendency statistic - Rank summary statistics - Distributional characteristic: skew - The five-number summary of data and relationship to boxplot - Statistical summaries of pairwise relationship between variables: sample covariance and correlation - The logarithmic transformation for skewed data ## Introduction to Statistical Learning ### Level 1 - Sources of randomness and stochasticity in data - The "inverse problem" way of thinking about data analysis - Properties of discrete probability distributions - Expectation for discrete probability distributions - How the sample mean is an _estimate_ of expected value - The law of large numbers and the central limit theorem - The statement of the central limit theorem - The Bernoulli, Binomial and Normal distributions - Joint and conditional distribution for discrete probability distributions - Conditional expectation for discrete probability distributions ### Level 2 - Using the CLT to get a confidence interval for the mean - Using the CLT to test a simple hypothesis about the mean ## Linear models for regression ### Level 1 - The linear regression model - Estimating linear regression parameters by minimizing residual sum of squares (RSS) - Fitting a linear regression model in R using the `lm` function - How the t-statistic and t-test is used in linear regression. - Diagnostic plots for linear regression - How to encode categorical predictors in a linear regression model, and how to interpret their coefficient estimates - How to incorporate and interpret predictor interactions in a linear regression model ### Level 2 - The closed form solution for the simple linear regression model. - Constructing a confidence interval for a parameter estimate in the linear regression model. - The R^2 measure to assess global fit in a regression model - How the F-test is used to test relationship between outcome and sets of predictors - What is co-linearity ## Linear models for classification ### Level 1 - What is a classification problem? - Why shouldn't you use linear regression (for continuous outcomes) to predict outcome for a binary categorical variable - What is log-odds? How do we transform log-odds to probabilities? - How is the logistic regression problem defined. - Fitting a logistic regression problem using the `glm` function. - How do we calculate error rate for a classification problem? - What are False positive and false negative errors? - What is the False positive rate? True positive rate? ### Level 2 - Understanding classification as a probability estimation problem. - The LDA (linear discriminant analysis) classification model. How to fit it using group-by/summarize queries. - The Naive Bayes classification model. How to fit it using group-by/summarize queries. - What are precision and recall? - How do you construct an Receiver Operator Curve (ROC) using True Positive and False positive rates? ## Tree-based methods ### Level 1 - What is a regression tree? - What is a classification (decision) tree? - Do tree-based methods learn linear or non-linear functions between predictors and outputs? - How to use recursive partitioning to build a regression tree ### Level 2 - What does it mean to "prune" a decision tree, why is that a good idea? - What is the random forest method? What is it's relationship to regression and decision trees. - How can we measure "variable importance" using the random forest algorithm. ## The support vector machine ### Level 1 - How should we encode (0/1 or -1/+1) categorical outcome data to fit a support vector machine. - Why is it called a support vector machine. - How to fit an svm using the `svm` function in the `e1071` R package. ### Level 2 - What is the purpose of the "cost" parameter in an SVM. - What is a kernel function, why do we use them in SVMs? - Why is looking at the number of support vectors in a fitted SVM useful? ## Model evaluation using resampling ### Level 1 - What is the difference between _model assessment_ and _model selection_ - Describe how $k$-fold cross validation is used for model assessment. Describe how $k$-fold cross validation is used for _model selection_. - How to compare models using cross-validation estimates of error. ### Level 2 - Why is $k$-fold cross validation preferable over other resampling methods (e.g., single validation set, or resampled validation sets). ## Unsupervised methods ### Level 1 - What is the distinction between unsupervised and supervised methods? - Why is PCA a "dimensionality reduction" method? - What is the objective function of the PCA problem? - The role of scaling and centering transformations in the PCA problem? ### Level 2 - What is the relevance of the 'percent variance explained' metric for PCA? - How can we determine predictor correlation from the result of PCA? ## Gradient Descent - What is the update rule for multivariate linear regression - What is the update rule for logistic regression - What is the general form of the gradient descent algorithm - What is the difference between the stochastic and batch versions of gradient descent ## Communication - What are some of the advantages provided by interactivity in the graphical presentation of data. <file_sep>/materials/projects/Project1.Rmd --- title: "Project 1: Data scraping and cleaning" author: "CMSC320, Spring 2020" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` **Posted:** Feb 17, 2020 **Last Updated:** `r format(Sys.Date(), "%b %d, %Y")` **Due:** Mar 9, 2020 You've been hired by a new space weather startup looking to disrupt the space weather reporting business. Your first project is to provide better data about the top 50 solar flares recorded so far than the data shown by your startup 's competitor [SpaceWeatherLive.com](https://www.spaceweatherlive.com/en/solar-activity/top-50-solar-flares). To do this, your boss pointed you to [this messy HTML page](http://cdaw.gsfc.nasa.gov/CME_list/radio/waves_type2.html) from NASA ([available here also](/misc/waves_type2.html)) where you can get extra data that your startup can post in your new spiffy site. Of course, you don't have access to the raw data for either of these two tables, so as an enterprising data scientist you will scrape this information directly from each HTML page using all the great tools we have seen in class. By the way, you should read up a bit on [Solar Flares](https://en.wikipedia.org/wiki/Solar_flare), [coronal mass ejections](https://www.spaceweatherlive.com/en/help/what-is-a-coronal-mass-ejection-cme), [the solar flare alphabet soup](http://spaceweather.com/glossary/flareclasses.html), [the scary storms of Halloween 2003](http://www.nasa.gov/topics/solarsystem/features/halloween_storms.html), and [sickening solar flares](https://science.nasa.gov/science-news/science-at-nasa/2005/27jan_solarflares). For this project you will do the following: - Scrape and prepare each of the two datasets - Integrate the two datasets (including some Entity Resolution) - Exploratory Analysis ## Part 1: Data scraping and preparation ### Step 1: Scrape your competitor's data (10 pts) Scrape data for the top 50 solar flares shown in [SpaceWeatherLive.com](https://www.spaceweatherlive.com/en/solar-activity/top-50-solar-flares). Steps to do this are: 1. Use `rvest` (if using R), or `beautiful soup` (if using Python) to parse the table into a data frame. 2. Rename attributes to some reasonable names for, e.g., `rank`, `flare_classification`, `date`, `flare_region`, `start_time`, `maximum_time`, `end_time`, `movie`. The result should be a data frame with the first few rows as: ``` # A tibble: 50 x 8 rank flare_classification date flare_region <int> <chr> <chr> <int> 1 1 X28.0 2003/11/04 486 2 2 X20.0 2001/04/02 9393 3 3 X17.2 2003/10/28 486 4 4 X17.0 2005/09/07 808 5 5 X14.4 2001/04/15 9415 6 6 X10.0 2003/10/29 486 7 7 X9.4 1997/11/06 8100 8 8 X9.3 2017/09/06 2673 9 9 X9.0 2006/12/05 930 10 10 X8.3 2003/11/02 486 # ... with 40 more rows, and 4 more variables: # start_time <chr>, max_time <chr>, end_time <chr>, # movie <chr> ``` ### Step 2: Tidy the top 50 solar flare data (10 pts) Your next step is to make sure this table is usable: 1. Drop the last column of the table, since we are not going to use it moving forward. 2. Combine the `date` attribuet and each of the three time attributes into three datetime columns. You will see why this is useful later on. 3. Convert columns containing datetimes into actual datetime objects. The result of this step should be a data_frame with the first few rows as: ``` # A tibble: 50 x 6 rank flare_classification start_datetime * <int> <chr> <dttm> 1 1 X28.0 2003-11-04 19:29:00 2 2 X20.0 2001-04-02 21:32:00 3 3 X17.2 2003-10-28 09:51:00 4 4 X17.0 2005-09-07 17:17:00 5 5 X14.4 2001-04-15 13:19:00 6 6 X10.0 2003-10-29 20:37:00 7 7 X9.4 1997-11-06 11:49:00 8 8 X9.3 2017-09-06 11:53:00 9 9 X9.0 2006-12-05 10:18:00 10 10 X8.3 2003-11-02 17:03:00 # ... with 40 more rows, and 3 more variables: # max_datetime <dttm>, end_datetime <dttm>, # flare_region <int> ``` ### Step 3. Scrape the NASA data (15 pts) Next you need to scrape the data in http://cdaw.gsfc.nasa.gov/CME_list/radio/waves_type2.html ([also available here](/misc/waves_type2.html)) to get additional data about these solar flares. This table format is described here: https://cdaw.gsfc.nasa.gov/CME_list/radio/waves_type2_description.htm, and here: #### NASA data description <div class="Section1"> <p class="MsoNormal" style="tab-stops:125.25pt">The Wind/WAVES type II burst catalog: A brief description</p> <p class="MsoNormal" style="tab-stops:125.25pt"><o:p>&nbsp;</o:p></p> <p class="MsoNormal" style="tab-stops:125.25pt">URL: <a href="http://cdaw.gsfc.nasa.gov/CME_list/radio/waves_type2.html">http://cdaw.gsfc.nasa.gov/CME_list/radio/waves_type2.html</a>.<span style="mso-spacerun:yes">&nbsp; </span></p> <p class="MsoNormal" style="tab-stops:125.25pt">This is a catalog of type II bursts observed by the<span style="mso-spacerun:yes">&nbsp; </span>Radio and Plasma Wave (WAVES) experiment on board the Wind spacecraft and the associated coronal mass ejections (CMEs) observed by the Solar and Heliospheric Observatory (SOHO) mission.<span style="mso-spacerun:yes">&nbsp; </span>The type II burst catalog is derived from the Wind/WAVES catalog available at <a href="http://lep694.gsfc.nasa.gov/waves/waves.html">http://lep694.gsfc.nasa.gov/waves/waves.html</a> by adding a few missing events. </p> <p class="MsoNormal" style="tab-stops:125.25pt">The CMEs in this catalog are called radio-loud CMEs because of their ability to produce type II radio bursts. The CME sources are also listed, as derived from the Solar Geophysical Data listing or from inner coronal images such as Yohkoh/SXT and SOHO/EIT.<span style="mso-spacerun:yes">&nbsp; </span>Some solar sources have also been obtained from Solarsoft Latest Events Archive after October 1, 2002: <a href="http://www.lmsal.com/solarsoft/latest_events_archive.html">http://www.lmsal.com/solarsoft/latest_events_archive.html</a></p> <p class="MsoNormal" style="tab-stops:125.25pt"><o:p>&nbsp;</o:p></p> <p class="MsoNormal" style="tab-stops:125.25pt">Explanation of catalog entries:</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 1: <span style="mso-spacerun:yes">&nbsp;</span>Starting date of the type II burst (yyyy/mm/dd format)</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 2: <span style="mso-spacerun:yes">&nbsp;</span>Starting time (UT) of the type II burst (hh:mm format)</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 3: <span style="mso-spacerun:yes">&nbsp;</span>Ending date of the type II burst (mm/dd format; year in Column 1 applies)</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 4: <span style="mso-spacerun:yes">&nbsp;</span>Ending time of the Type II burst (<span class="GramE">hh:mm<span style="mso-spacerun:yes">&nbsp; </span>format</span>) </p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 5: <span style="mso-spacerun:yes">&nbsp;</span>Starting frequency of type II burst (kHz) [1]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 6: <span style="mso-spacerun:yes">&nbsp;</span>Ending frequency of type II burst (kHz) [1]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 7: <span style="mso-spacerun:yes">&nbsp;</span>Solar source location (Loc) of the associated eruption in heliographic coordinates [2]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 8:<span style="mso-spacerun:yes">&nbsp; </span>NOAA active region number (NOAA) [3]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 9:<span style="mso-spacerun:yes">&nbsp; </span>Soft X-ray flare importance (Imp<span class="GramE">) <span style="mso-spacerun:yes">&nbsp;</span>[</span>4]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 10: Date of the associated CME (mm/dd format, Year in Column 1 applies) [5]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 11: Time of the associated CME (hh:mm format)</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 12: Central position angle (CPA, degrees) for non-halo CMEs [6]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 13: CME width in the sky plane (degrees) [7]</p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 14: CME speed in the sky plane (km/s) </p> <p class="MsoNormal" style="tab-stops:125.25pt">Column 15: Link to the daily proton, height-time, X-ray (PHTX) plots [8]</p> <p class="MsoNormal" style="tab-stops:125.25pt"><o:p>&nbsp;</o:p></p> <p class="MsoNormal" style="tab-stops:125.25pt">Notes</p> <p class="MsoNormal" style="tab-stops:125.25pt">[1<span class="GramE">]<span style="mso-spacerun:yes">&nbsp; </span>????</span> <span class="GramE">indicate</span> that the starting and ending frequencies are not determined.</p> <p class="MsoNormal" style="tab-stops:125.25pt">[2] Heliographic coordinates.<span style="mso-spacerun:yes">&nbsp; </span>S25E16 means the latitude is 25 deg south and 16 deg east (source located in the southeast quadrant of the Sun. N denotes northern latitudes and W denotes western longitudes. Entries like SW90 indicate that the source information is not complete, but we can say that the eruption occurs on the west limb but at southern latitudes; if such entries have a subscript b (e.g., NE90b) it means that the source is behind the particular limb. This information is usually gathered from SOHO/EIT difference images, which show dimming above the limb in question. Completely backside events with no information on the source location are marked as “back”. </p> <p class="MsoNormal" style="tab-stops:125.25pt">[3] If the active region number is not available or if the source region is not an active region, the entry is “----”. Filament regions are denoted by “FILA” or “DSF” for disappearing solar filament.</p> <p class="MsoNormal" style="tab-stops:125.25pt">[4] Soft X-ray <span class="GramE">flare</span> size (peak flux in the 1-8 A channel) from GOES. “----” means the soft X-ray flux is not available.</p> <p class="MsoNormal" style="tab-stops:125.25pt">[5] Lack of SOHO observations are noted as “LASCO DATA GAP”.<span style="mso-spacerun:yes">&nbsp; </span>Other reasons are also noted if there is no CME parameters measured.</p> <p class="MsoNormal" style="tab-stops:125.25pt">[6] The central position angle (CPA) is meaningful only for non-halo CMEs. For halo CMEs, the entry is “Halo”. For halo CMEs, the height-time measurements are made at a position angle where the halo appears to move the fastest. This is known as the measurement position angle (MPA) and can be found in the main catalog (<a href="http://cdaw.gsfc.nasa.gov/CME_List">http://cdaw.gsfc.nasa.gov/CME_List</a>).</p> <p class="MsoNormal" style="tab-stops:125.25pt">[7] Width = 360 means the CME is a fill halo (see [6]). For some entries, there is a <span class="GramE">prefix<span style="mso-spacerun:yes">&nbsp; </span>“</span>&gt;”, which means the reported width is a lower limit.</p> <p class="MsoNormal" style="tab-stops:125.25pt">[8<span class="GramE">] <span style="mso-spacerun:yes">&nbsp;</span>‘PHTX’</span> (proton, height-time, X-ray) link to three-day overview plots of solar energetic particle events (protons in the &gt;10, &gt;50 and &gt;100 MeV GOES channels).</p> <p class="MsoNormal" style="tab-stops:125.25pt"><o:p>&nbsp;</o:p></p> <p class="MsoNormal" style="tab-stops:125.25pt">Links:</p> <p class="MsoNormal" style="tab-stops:125.25pt">The CMEs and the type II bursts can be viewed together using the c2rdif_waves.html movies linked to the starting frequency (Column 5). The c3rdif_waves.html movies are linked to the ending frequencies (Column 6). The CMEs and the GOES flare light curves for a given type II burst can be viewed from the Javascript movies linked to the CME date (Column 10).<span style="mso-spacerun:yes">&nbsp; </span>The height-time plots (linear and quadratic) of the CMEs are linked to the CME speed (Column 14). </p> <p class="MsoNormal" style="tab-stops:125.25pt">PHTX plots are linked to Column 15.</p> <p class="MsoNormal" style="tab-stops:125.25pt"><o:p>&nbsp;</o:p></p> <p class="MsoNormal" style="tab-stops:125.25pt">If you have questions, contact: <NAME> (<a href="mailto:<EMAIL>"><EMAIL></a>)</p> <p class="MsoNormal" style="tab-stops:125.25pt">This work is supported by NASA’s Virtual Observatories Program</p> <p class="MsoNormal" style="tab-stops:125.25pt"><o:p>&nbsp;</o:p></p> <p class="MsoNormal"><o:p>&nbsp;</o:p></p> </div> #### Tasks 1. Use `rvest` (if using R) or `beautifulsoup4` (if using python) to obtain each row of data as a long string. Create a `data_frame` (or Pandas `DataFrame`) at this point so it's easier to operate on. 2. Separate each line of text into a data row. Choose appropriate names for columns. The result of this step should be similar to: ``` # A tibble: 482 x 14 start_date start_time end_date * <chr> <chr> <chr> 1 1997/04/01 14:00 04/01 2 1997/04/07 14:30 04/07 3 1997/05/12 05:15 05/14 4 1997/05/21 20:20 05/21 5 1997/09/23 21:53 09/23 6 1997/11/03 05:15 11/03 7 1997/11/03 10:30 11/03 8 1997/11/04 06:00 11/05 9 1997/11/06 12:20 11/07 10 1997/11/27 13:30 11/27 # ... with 472 more rows, and 11 more # variables: end_time <chr>, # start_frequency <chr>, end_frequency # <chr>, flare_location <chr>, # flare_region <chr>, # flare_classification <chr>, cme_date # <chr>, cme_time <chr>, cme_angle # <chr>, cme_width <chr>, cme_speed # <chr> ``` ### Step 4: Tidy the NASA the table (15 pts) Now, we tidy up the NASA table. Here we will code missing observations properly, recode columns that correspond to more than one piece of information, treat dates and times appropriately, and finally convert each column to the appropriate data type. 1. Recode any missing entries as `NA`. Refer to the data description in http://cdaw.gsfc.nasa.gov/CME_list/radio/waves_type2_description.html (and above) to see how missing entries are encoded. 2. The CPA column (`cme_angle`) contains angles in degrees for most rows, except for halo flares, which are coded as `Halo`. Create a new (logical) column that indicates if a row corresponds to a halo flare or not, and then replace `Halo` entries in the `cme_angle` column as NA. 3. The `width` column indicates if the given value is a lower bound. Create a new (logical) column that indicates if width is given as a lower bound, and remove any non-numeric part of the width column. 4. Combine date and time columns for `start`, `end` and `cme` so they can be encoded as `datetime` objects. 5. Convert columns to appropriate data types. The output of this step should be similar to this (note the column types) ``` # A tibble: 482 x 13 start_datetime end_datetime <dttm> <dttm> 1 1997-04-01 14:00:00 1997-04-01 14:15:00 2 1997-04-07 14:30:00 1997-04-07 17:30:00 3 1997-05-12 05:15:00 1997-05-14 16:00:00 4 1997-05-21 20:20:00 1997-05-21 22:00:00 5 1997-09-23 21:53:00 1997-09-23 22:16:00 6 1997-11-03 05:15:00 1997-11-03 12:00:00 7 1997-11-03 10:30:00 1997-11-03 11:30:00 8 1997-11-04 06:00:00 1997-11-05 04:30:00 9 1997-11-06 12:20:00 1997-11-07 08:30:00 10 1997-11-27 13:30:00 1997-11-27 14:00:00 # ... with 472 more rows, and 11 more variables: # cme_datetime <dttm>, start_frequency <int>, # end_frequency <int>, flare_location <chr>, # flare_region <chr>, flare_classification <chr>, # cme_angle <int>, cme_speed <int>, halo <lgl>, # cme_width <int>, cme_width_limit <lgl> ``` ## Part 2: Analysis Now that you have data from both sites, let's start some analysis. ### Question 1: Replication (10 pts) Can you replicate the top 50 solar flare table in [SpaceWeatherLive.com](https://www.spaceweatherlive.com/en/solar-activity/top-50-solar-flares) exactly using the data obtained from NASA? That is, if you get the top 50 solar flares from the NASA table based on their classification (e.g., `X28` is the highest), do you get data for the same 50 solar flare events in the SpaceWeatherLive page? If not, why not? Include code used to get the top 50 solar flares from the NASA table (be careful when ordering by classification, separating classification is useful). Write a sentence or two discussing how well you can replicate the SpaceWeatherLive data from the NASA data. ### Question 2: Entity Resolution (15 pts) Let's see if we can improve how well the two datasets match each other by doing some _Entity Resolution_. Let's denote entities (flares) from the SpaceWeatherLive page as $E_1$, and entities (flares) from the NASA data as $E_2$ 1) Write a function `flare_similarity` which computes a _similarity_ $s(e_1, e_2)$ between flares $e_1 \in E_1$ and $e_2 \in E_2$. 2) Write a second function `flare_match` that computes for each flare $e_1 \in E_1$ which flare $e_2 \in E_2$ is the most similar. Your function can return `NA` if there is no entity $e_2$ that is sufficiently similar. Here, you determine what is the best matching entry in the NASA data for each of the top 50 solar flares in the SpaceWeatherLive page, if there is such an entry. 3) Add the result of `flare_match` to the top 50 table as the _index_ of the best matching row in the NASA table, or `NA`. In your submission, include an text explanation of how you define the similarity function $s(e_1, e_2)$ and how you use it to determine the best matching entitiy ### Question 3: Analysis (10 pts) Prepare one plot that shows the top 50 solar flares in context with all data available in the NASA dataset. Here are some possibilities (you can do something else) 1. Plot attributes in the NASA dataset (e.g., starting or ending frequencies, flare height or width) over time. Use graphical elements (e.g., text or points) to indicate flares in the top 50 classification. 2. Do flares in the top 50 tend to have Halo CMEs? You can make a barplot that compares the number (or proportion) of Halo CMEs in the top 50 flares vs. the dataset as a whole. 3. Do strong flares cluster in space? Are there solar regions that have strong flares (in the top 50) more commonly than expected (considering the full flare dataset)? ## Submission Prepare an Rmarkdown file (if using R) or Jupyter notebook (if using python) that includes for each step in Part 1: (a) code to carry out the step discussed, (b) partial output showing the output of your code, similar to the examples above, and (c) a short prose description of how your code works. For questions 1 and 2 of Part 2, follow the instructions there. For Question 3 of part 2 provide: (a) a short description (2 sentences) of what the intent of your plot is (think in terms of our discussion on how we show variation, co-variation in terms of central trend, spread, skew etc.), (b) code to produce your plot, (c) a short text description of your plot, and (d) a sentence or two of interpretation of your plot (again think of variation, co-variation, etc.). Export to PDF and submit that to ELMS. <file_sep>/materials/lectures/SVM/SVM.Rmd --- title: Support Vector Machine author: CMSC320 date: "`r Sys.Date()`" output: html_document --- Support Vector Machines are state-of-the-art classification methods. It is a flexible and efficient framework to learn classifers. They build upon linear methods we have discussed previously and have a nice geometric interpretation of how they are trained (based maximum margin arguments). Their flexibility comes from the fact that they can be trained over _similarities_ between observations (more on this later) rather than standard data in tabular form. This is useful in applications where string similarities, or network similarities are readily available. SVMs also follow the "classification as a space partition" framework that we have seen for logistic regression and decision trees. ```{r, echo=FALSE} library(MASS) library(RColorBrewer) mycols <- brewer.pal(8, "Dark2")[c(3,2)] s <- sqrt(1/5) set.seed(30) makeX <- function(M, n=100, sigma=diag(2)*s) { z <- sample(1:nrow(M), n, replace=TRUE) m <- M[z,] return(t(apply(m,1,function(mu) mvrnorm(1,mu,sigma)))) } M0 <- mvrnorm(10, c(1,0), diag(2)) # generate 10 means x0 <- makeX(M0) ## the final values for y0=blue M1 <- mvrnorm(10, c(0,1), diag(2)) x1 <- makeX(M1) x <- rbind(x0, x1) y <- c(rep(0,100), rep(1,100)) cols <- mycols[y+1] GS <- 75 # put data in a Gs x Gs grid XLIM <- range(x[,1]) tmpx <- seq(XLIM[1], XLIM[2], len=GS) YLIM <- range(x[,2]) tmpy <- seq(YLIM[1], YLIM[2], len=GS) newx <- expand.grid(tmpx, tmpy) colnames(newx) <- c("X1","X2") ``` ```{r, echo=FALSE, fig.height=10, fig.width=10} layout(matrix(1:4, nr=2, byrow=TRUE)) plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n", main="Training Set") points(x, col=cols) # linear SVM library(e1071) dat <- data.frame(X1=x[,1], X2=x[,2]) fit <- svm(y~X1+X2, data=dat, cost=1, kernel="linear", type="C-classification") yhat <- attr(predict(fit, newdata=newx, decision.values=TRUE), "decision.values")[,1] yhat <- ifelse(yhat > 0, 2, 1) colshat <- mycols[yhat] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="linear svm") points(x, col=cols) points(newx, col=colshat, pch=".") contour(tmpx, tmpy, matrix(as.numeric(yhat),GS,GS), levels=c(1,2), add=TRUE, drawlabels=FALSE) fit <- svm(y~X1+X2, data=dat, cost=1, kernel="radial", type="C-classification", gamma=1) yhat <- attr(predict(fit, newdata=newx, decision.values=TRUE), "decision.values")[,1] yhat <- ifelse(yhat > 0, 2, 1) colshat <- mycols[yhat] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="non-linear svm RBF gamma=1") points(x, col=cols) points(newx, col=colshat, pch=".") contour(tmpx, tmpy, matrix(as.numeric(yhat),GS,GS), levels=c(1,2), add=TRUE, drawlabels=FALSE) fit <- svm(y~X1+X2, data=dat, cost=1, kernel="radial", type="C-classification", gamma=5) yhat <- attr(predict(fit, newdata=newx, decision.values=TRUE), "decision.values")[,1] yhat <- ifelse(yhat > 0, 2, 1) colshat <- mycols[yhat] plot(x, col=cols, xlab="X1", ylab="X2", xlim=XLIM, ylim=YLIM, type="n",main="non-linear svm RBF gamma=5") points(x, col=cols) points(newx, col=colshat, pch=".") contour(tmpx, tmpy, matrix(as.numeric(yhat),GS,GS), levels=c(1,2), add=TRUE, drawlabels=FALSE) ``` ### The two-class linear Support Vector Machine Given training data: $\{(\mathbf{x}_1,y_1), (\mathbf{x}_2,y_2),\ldots,(\mathbf{x}_n,y_n)\}$, where $\mathbf{x}_i$ is a vector of $p$ predictor values for $i$th observation, and $y_i$ is the class label (we're going to use +1 and -1), SVMs define a _discriminative_ function such that $$ \beta_0 + \beta_1 x_{i1} + \beta_2 x_{i2} + \cdots + \beta_p x_{ip} > 0 \, \mathrm{ if } y_i = 1 $$ and $$ \beta_0 + \beta_1 x_{i1} + \beta_2 x_{i2} + \cdots + \beta_p x_{ip} < 0 \, \mathrm{ if } y_i = -1 $$ Note that points where the _discriminative_ function equals 0 form a _hyper-plane_ (i.e., a line in 2D) ![](9_2.png) A central concept in SVMs that we did not see in logistic regression is **the margin**: the distance between the separating plane and its nearest datapoints. ```{r, echo=FALSE} library(png) library(grid) img <- readPNG("9_3.png") grid.raster(img) ``` The SVMs is built from tree _key insights_: 1. **Look for the maximum margin hyper-plane** 2. Only depends on a subset of observations (support vectors) 3. Only depends on pair-wise "similarity" functions of observations Let's see these in turn: **Look for the maximum margin hyper-plane** The goal is to find the plane (think line in 2D) that separates training data with largest margin. This will tend to _generalize_ better since new observations have room to fall within margin and still be classified correctly. This can be cast as _optimization_ problem (see _Numerical Methods_ or _Machine Learning_ class for details): $$ \mathrm{max}_{\beta_0,\beta_1,\ldots,\beta_p} M \\ \mathrm{s.t} \sum_{j=1}^p \beta_p^2 = 1 \\ y_i(\beta_0 + \beta_1 x_{i1} + \ldots + \beta_p x_{ip}) \geq M \, \forall i $$ This runs however into a complication: _What if there is no separating hyper-plane?_ The solution is to penalize observations on the **wrong side of the margin**. ![](9_6.png) $$ \mathrm{max}_{\beta_0,\beta_1,\ldots,\beta_p} M \\ \mathrm{s.t} \sum_{j=1}^p \beta_p^2 = 1 \\ y_i(\beta_0 + \beta_1 x_{i1} + \ldots + \beta_p x_{ip}) \geq M(1-\epsilon_i) \, \forall i \\ \epsilon_i \geq 0 \, \forall i \\ \sum_{i=1}^n \epsilon_i \leq C $$ $C$ is a parameter that tradeoffs the width of the margin vs. the penalty on observations on the _wrong_ side of the margin. This parameter $C$ has to be selected by the user or via cross-validation model selection methods we saw before. ```{r, echo=FALSE, fig.width=10, fig.height=10} img <- readPNG("9_7.png") grid.raster(img) ``` _Key insight no. 2_: **SVMs only depend on a subset of observations (support vectors)** As a result of maximum-margin formulation, we only need observations that are on the "wrong" side of the margin to get $\beta$ values. These are called _support vectors_. In general: the smaller the parameter is $C$, the learned SVM will have fewer SVs. You can also think of the number of SVs as a rough measure of the _complexity_ of the SVM obtained. _Key insight no. 3_: **SVMs only depend on pairwise "similarity" functions of observations** We can solve the optimization problem above only using inner products between observations (as opposed to the observations themselves) _Inner product_: $\langle x_i, x_{i'} \rangle = \sum_{j=1}^p x_{ij}x_{i'j}$ As a result, we can write the _discriminant_ function in equivalent form $$ f(x) = \beta_0 + \sum_{i=1}^n \alpha_i \langle x, x_i \rangle $$ which, by definition, has $\alpha_i > 0$ **only** for SVs ### Non-linear Support Vector Machine This last insight is how learn non-linear discriminative functions in SVMs. We can generalize inner product using "kernel" functions that provide something like an inner product: $$ f(x) = \beta_0 + \sum_{i=1}^n \alpha_i k(x, x_i) $$ ![](9_8.png) But, what is $k$? Let's consider two examples. - _Polynomial kernel_: $k(x,x_i) = 1+\langle x, x_i \rangle^d$ - _RBF (radial) kernel_: $k(x,x_i) = \exp\{-\gamma \sum_{j=1}^p (x_{j}-x_{ij})^2\}$ ![](9_9.png) ```{r, echo=FALSE} library(RColorBrewer) palette(brewer.pal(8, "Dark2")) k <- function(x, x0=0, gamma=1) { exp(-gamma*(x-x0)^2) } x <- seq(-3, 3, len=100) plot(x, k(x), type="l", lwd=2, col=1, main="RBF kernel") lines(x, k(x,gamma=10), lwd=2, col=2) lines(x, k(x,gamma=.1), lwd=2, col=3) legend("topright", legend=paste("gamma=",c(1,10,.1)), lty=1, lwd=2, col=1:3) ``` ## Fitting Support Vector Machines in R Again, the familiar _formula_ interface is used to train SVMs. In this case we indicate that we are learning a _linear_ SVM using the `kernel` function argument. The tradeoff parameter $C$ is indicated in the `cost` function argument. Here we are fitting three different SVMs resulting from using three different values of $C$. ```{r} library(e1071) library(ISLR) data(Default) n <- nrow(Default) train_indices <- sample(n, n/2) costs <- c(.01, 1, 100) svm_fits <- lapply(costs, function(cost) { svm(default~., data=Default, cost=cost, kernel="linear",subset=train_indices) }) ``` Let's take a look at how these SVMs behave: ```{r} number_svs <- sapply(svm_fits, function(fit) fit$tot.nSV) error_rate <- sapply(svm_fits, function(fit) { yhat <- predict(fit, newdata=Default[train_indices,]) train <- mean(yhat != Default$default[train_indices]) yhat <- predict(fit, newdata=Default[-train_indices,]) test <- mean(yhat != Default$default[-train_indices]) c(train=train, test=test) }) tab <- data.frame(cost=costs, number_svs=number_svs, train_error=error_rate["train",]*100,test_error=error_rate["test",]*100) knitr::kable(tab) ``` Let's try now a _non-linear_ SVM by using a radial kernel and indicating that using the `kernel` function argument. Notice now that we have two parameters to provide to the fitting function: tradeoff parameter $C$ and parameter $\gamma$ of the radial kernel function. ```{r} costs <- c(.01, 1, 10) gamma <- c(.01, 1, 10) parameters <- expand.grid(costs, gamma) svm_fits <- lapply(seq(nrow(parameters)), function(i) { svm(default~., data=Default, cost=parameters[i,1], kernel="radial", gamma=parameters[i,2], subset=train_indices) }) ``` Let's take at the result in this case: ```{r} number_svs <- sapply(svm_fits, function(fit) fit$tot.nSV) error_rate <- sapply(svm_fits, function(fit) { yhat <- predict(fit, newdata=Default[train_indices,]) train <- mean(yhat != Default$default[train_indices]) yhat <- predict(fit, newdata=Default[-train_indices,]) test <- mean(yhat != Default$default[-train_indices]) c(train=train, test=test) }) tab <- data.frame(cost=parameters[,1], gamma=parameters[,2], number_svs=number_svs, train_error=error_rate["train",]*100,test_error=error_rate["test",]*100) knitr::kable(tab) ``` # K-nearest neighbors K-nn is a related approach to obtain non-linear classification boundaries. It is a "memory" method as it requires the entire training data is used to make predictions on new data. Conceptually, the classifier is very simple: to make a prediction on entity $x$: - Find $k$ nearest observations in training set (note this requires a distance function, e.g., Euclidean distance) - Predict the majority class within the $k$ nearest neighbors The `class::knn` function can be used in R to use this classifier. Note that the number of neighbors $k$ is a hyper-parameter that must be selected before making predictions. Selecting $k$ falls under the _model selection_ problem we will discuss later. <file_sep>/materials/quizzes/eda_activity.Rmd --- title: "EDA Activity" author: "CMSC320" date: "October 3, 2016" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Let's practice some EDA work. We're using the `Wage` dataset provided by the `ISLR` package. ```{r} library(tibble) library(dplyr) library(ggplot2) library(ISLR) data(Wage) wage <- as_tibble(Wage) wage ``` Let's warmup with one question: **Q0**: How are wages distributed overall across years? ```{r, fig.width=3, fig.height=3, fig.align="center"} wage %>% ggplot(aes(x=factor(year), y=wage, color=education)) + geom_boxplot() ``` Now, on your own: **Q1**: How are wages distributed across years as a function of education? (Write the code to make this plot) ```{r, echo=FALSE, fig.width=6, fig.height=3, fig.align="center"} wage %>% ggplot(aes(x=factor(year), y=wage, color=education)) + geom_boxplot() ``` **Q2**: How is the central tendency (e.g., median) of wage changing across years? ```{r} # transform data wage %>% group_by(year, education) %>% summarize(med_wage=median(wage)) %>% # make the plot ggplot(aes(x=year,y=med_wage, color=education)) + geom_line() ``` **Q3**: How is median wage changing across years as a function of education? ```{r} # transform data wage %>% group_by(year) %>% summarize(med_wage=median(wage)) %>% # make the plot ggplot(aes(x=,y=)) + geom_() ``` **Q4**: Is the wage gap between those with advanced degrees and those with less than a HS education changing over time? _Part 1_: How are you going to define the wage gap? _Part 2_: Make a data frame with columns `year` and `wage_gap`. ```{r} wage %>% group_by(year, education) %>% summarize(med_wage = median(wage)) %>% filter(str_detect(education, "^1") | str_detect(education, "^5")) %>% ungroup() %>% spread(education, med_wage) %>% select(year, hs=matches("^1"), ad=matches("^5")) %>% mutate(gap = (1 - (hs / ad)) * 100) %>% ggplot(aes(x=year, y=gap)) + geom_line() ``` _Part 3_: Plot wage gap as a function of year.<file_sep>/materials/slides/course_info/intro.Rmd --- title: "Course Information" author: "<NAME>" company: "University of Maryland" date: "`r Sys.Date()`" css: ["custom.css"] output: xaringan::moon_reader: lib_dir: libs seal: false includes: after_body: "custom.html" nature: highlightStyle: github highlightLines: true countIncrementalSlides: false ratio: "16:9" --- class: title-slide, center, middle count: false .banner[![](img/epiviz.png)] .title[CMSC320 Introduction to Data Science] .author[<NAME>] .other-info[ University of Maryland, College Park, USA CMSC320: `r Sys.Date()` ] .logo[![](img/logo.png)] --- ```{r setup, include=FALSE} options(htmltools.dir.version = FALSE) knitr::opts_chunk$set(cache=TRUE) ``` ## TL;DR Course Webpage: http://bit.ly/hcb-ids --- ## Why Data Science? > “The ability to take data—to be able to understand it, to process it, to extract value from it, to visualize it, to communicate it—that’s going to be a hugely important skill in the next decades, not only at the professional level but even at the educational level for elementary school kids, for high school kids, for college kids.” > <NAME>, Chief Economist at Google (http://www.mckinsey.com/insights/innovation/hal_varian_on_how_the_web_challenges_managers) --- class: middle, center ## Data Science .image-50[![](img/conway.png)] --- layout: false ## Course organization This course will cover basics of how to represent, model and communicate about data and data analyses using the R environment for Data Science - Area 0: tools and skills - Area 1: Data types and operations - Area 2: Data wrangling - Area 3: Modeling - Area 4: Applications - Area 5: Communication --- layout: false ## Evaluation Four projects with _real_ data - Astronomy (Data Cleaning) - Business (Baseball) (Exploratory Data Analysis) - Macroenomics (Regression) - Real Estate (Prediction) - Crime Statistics (Interactive Visualization) Two midterms 5-6 short assignments Final project (no final exam) <file_sep>/static/misc/hw2_er-sql.Rmd --- title: "HW2: ER Diagram and SQL" author: "Your name here" date: "`r Sys.Date()`" output: html_document: df_print: paged html_notebook: df_print: paged editor_options: chunk_output_type: inline --- ## ER Diagram _DELETE THESE INSTRUCTIONS FROM YOUR SUBMISSION: Make a `PNG` or `JPEG` file with your diagram and insert here:_ ![](/Users/hcorrada/tmp/er.png) ## SQL ```{r setupdb, include=FALSE} # make sure you write the path to your sqlite path here db <- DBI::dbConnect(RSQLite::SQLite(), "data/lahman2016.sqlite") ``` _DELETE THESE INSTRUCTIONS FROM YOUR SUBMISSION: write SQL in each of these chunks, set `eval=TRUE` to evaluate_ 1) How many franchises are listed in the database (see [`count`](https://sqlite.org/lang_aggfunc.html#count))? ```{sql q1, connection=db, eval=FALSE} ``` 2) How many franchises are currently active? ```{sql q2, connection=db, eval=FALSE} ``` 3) Which teams won more than 100 games in one season between 2000 and 2015? Order result by descending number of wins. (attribute `W` of the Teams table contains the number of wins) ```{sql q3, connection=db, eval=FALSE} ``` 4) What is the franchise name of the team with the most total wins in the database? ```{sql q4, connection=db, eval=FALSE} ``` 5) What is the franchise name of the team with the highest winning percentage in a season in the database? (Win percentage is `W/G`) ```{sql q5, connection=db, eval=FALSE} ``` 6) What is the franchise name of the team with the highest single-year payroll between 2000 and 2015? ```{sql q6, connection=db, eval=FALSE} ``` 7) (BONUS from [MDSR book](https://mdsr-book.github.io/)): Identify players (by first and last name) that have attained through their career either a) 500 or more HRs or b) 3000 or more hits (H) _and_ have not been inducted to the Hall of Fame (see `HallOfFame` table). ```{sql q7, connection=db, eval=FALSE} ``` ```{r disconnectdb, include=FALSE} DBI::dbDisconnect(db) ``` <file_sep>/content/lecture-note/svms/index.md --- title: Support Vector Machines and KNN --- Linear and non-linear regression and classification with support vector machines and KNN [Lecture notes](SVM/) ## Resources ISLR Ch. 9 <file_sep>/content/lecture-note/datamodels/index.md --- date: 2016-09-26T11:42:11-04:00 title: Modeling and tidying data --- How to get data into shape before analysis - [Lecture notes: Data models](DataModels/) - [Lecture notes: Tidy data](tidy_data/) ### Readings and resources - [<NAME> (2014). Tidy data. _Journal of Statistical Software_](https://www.jstatsoft.org/index.php/jss/article/view/v059i10/v59i10.pdf) - [<NAME> (2016). R for Data Science](http://r4ds.had.co.nz/)
d012269b0e14a555442409045306e872bdfe6e15
[ "HTML", "Markdown", "TOML", "R", "RMarkdown", "Shell" ]
137
RMarkdown
osundiranay/IntroDataSci-5000
9224ef88e2daba204e371b604b6838858e58d08d
4102ffc7b681ce3b69a30050e1e16db664728f52
refs/heads/master
<repo_name>hallo33/ArchieBerichte<file_sep>/ch.fhnw.globiglobi/src/ch/fhnw/globiglobi/reports/SalePerMandator.java /******************************************************************************* * Copyright (c) 2014 <NAME>, <NAME>. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * <NAME> * <NAME> *******************************************************************************/ package ch.fhnw.globiglobi.reports; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.Status; import ch.elexis.core.data.activator.CoreHub; import ch.elexis.data.Konsultation; import ch.elexis.data.Kontakt; import ch.elexis.data.Query; import ch.elexis.data.Verrechnet; import ch.fhnw.globiglobi.reports.i18n.Messages; import ch.fhnw.globiglobi.widgets.SelectMandator; import ch.unibe.iam.scg.archie.annotations.GetProperty; import ch.unibe.iam.scg.archie.annotations.SetProperty; import ch.unibe.iam.scg.archie.model.AbstractTimeSeries; import ch.unibe.iam.scg.archie.ui.widgets.WidgetTypes; /** * <p> * Provides statistics about the mandators services ordered by the Mandators name. Resulting dataset * contains information about mandators services. * </p> * * * @author <NAME> * @author <NAME> */ public class SalePerMandator extends AbstractTimeSeries { /** * Shows only services for active mandator if true, for all mandators else. */ private boolean currentMandatorOnly; /** * Initialize a double value. */ private double summe = 0; /** * Shows only services of the specific mandator */ private String selectedMandatorID; /** * Date format for data that comes from the database. */ private static final String DATE_DB_FORMAT = "yyyyMMdd"; /** * Defines the constructor for SalePerMandator. */ public SalePerMandator(){ super(Messages.SALEPERMANDATOR_TITLE); this.selectedMandatorID = SelectMandator.DEFAULT_SELECTED; } /** * Return an appropriate description */ public String getDescription(){ return Messages.SALEPERMANDATOR_DESCRIPTION; } /** * Create dataset headings in this method */ @Override protected List<String> createHeadings(){ final ArrayList<String> headings = new ArrayList<String>(7); headings.add(Messages.SALEPERMANDATOR_HEADING_MANDATOR); headings.add(Messages.SALEPERMANDATOR_HEADING_CONSID); headings.add(Messages.SALEPERMANDATOR_HEADING_DETECTEDSERVICE); headings.add(Messages.SALEPERMANDATOR_HEADING_CHARGEDSERVICE); headings.add(Messages.SALEPERMANDATOR_HEADING_PAYEDSERVICE); headings.add(Messages.SALEPERMANDATOR_HEADING_SALE); headings.add(Messages.SALEPERMANDATOR_HEADING_TOTALSALE); return headings; } /** * Compose the contents of a dataset here */ @Override protected IStatus createContent(IProgressMonitor monitor){ monitor.beginTask("Umsätze pro Mandant", IProgressMonitor.UNKNOWN); Query consQuery = new Query(Konsultation.class); final Query<Kontakt> mandQuery = new Query<Kontakt>(Kontakt.class); /** * Date format for data that comes from the database. */ final SimpleDateFormat databaseFormat = new SimpleDateFormat(DATE_DB_FORMAT); /** * Check for results of the Query in the selected date space. */ consQuery.add("Datum", ">=", databaseFormat.format(this.getStartDate().getTime())); consQuery.add("Datum", "<=", databaseFormat.format(this.getEndDate().getTime())); // check if checkbox current mandator only is on or a mandator is selected if (!this.selectedMandatorID.equals("All")) { mandQuery.add("Bezeichnung3", "=", this.selectedMandatorID); // List<Kontakt> mandatorIDselect = ""; consQuery.add("MandantID", "=", mandQuery.execute().get(0).getId()); } else { if (this.currentMandatorOnly) { consQuery.add("MandantID", "=", CoreHub.actMandant.getId()); } } monitor.subTask("Lade Konsultationen"); // Execute Query List<Konsultation> consultations = consQuery.execute(); // initialize list for content final ArrayList<Comparable<?>[]> content = new ArrayList<Comparable<?>[]>(7); summe = 0; // Eveything following is done for the consultations in the list consultations. for (Konsultation cons : consultations) { // Initialize the Strings to create data for the content. String erfasst = ""; String verrechnet = ""; String bezahlt = ""; String umsatz = ""; String totalerUmsatz = ""; String consID = ""; // check if cosultation is not deleted. if (!cons.delete()) { // check if consultation has a mandator, get his name and the consultation's ID. if (cons.getMandant() != null) { String mandant = cons.getMandant().getName(); consID = cons.getId(); // Create new list with Verrechnet in it from the Leistungen of a consultation. List<Verrechnet> Verrechenbar = cons.getLeistungen(); Iterator<Verrechnet> itr = Verrechenbar.iterator(); // Check if the List Verrechenabr has next with an iterator and get it's text. while (itr.hasNext()) { Verrechnet v = itr.next(); if (erfasst.equals("")) { erfasst = v.getText(); } else { erfasst += "\n" + v.getText(); } // check if a consultation has a bill. if (cons.getRechnung() != null) { verrechnet = "verrechnet"; // check if the bill is payed allready. if (cons.getRechnung().getOffenerBetrag().getAmountAsString() .equals("0.00")) { bezahlt = "bezahlt"; // calculate the sale and the total sale of mandator's calculations. double geld = (v.getNettoPreis().getAmount() * v.getZahl()); double geldRund = Math.round(100.0 * geld) / 100.0; if (umsatz.equals("")) { umsatz = String.valueOf(geldRund); } else { umsatz += "\n" + String.valueOf(geldRund); } summe += geldRund; totalerUmsatz = String.valueOf(Math.round(100.0 * summe) / 100.0); } // check if a bill has to be payed still. else if (!cons.getRechnung().getOffenerBetrag().getAmountAsString() .equals("0.00")) { bezahlt = "offen"; umsatz = "0"; totalerUmsatz = "0"; } } // if a consultation has no bill. else { verrechnet = "nicht verrechnet"; bezahlt = "offen"; umsatz = "0"; totalerUmsatz = "0"; } } // add all the date from the content list to the final table rows. Comparable<?>[] row = new Comparable<?>[this.dataSet.getHeadings().size()]; int index = 0; row[index++] = mandant; row[index++] = consID; row[index++] = erfasst; row[index++] = verrechnet; row[index++] = bezahlt; row[index++] = umsatz; row[index++] = totalerUmsatz; content.add(row); // check for cancelation. if (monitor.isCanceled()) { return Status.CANCEL_STATUS; } } } } // set content in the dataSet this.dataSet.setContent(content); // job finished successfully monitor.done(); return Status.OK_STATUS; } /** * @return True if statistic should be created for current mandator only, false else. */ @GetProperty(name = "Active Mandator Only", index = 1, widgetType = WidgetTypes.BUTTON_CHECKBOX, description = "Compute statistics only for the current mandator. If unchecked, the statistic will be computed for all mandators.") public boolean getCurrentMandatorOnly(){ return this.currentMandatorOnly; } /** * @param currentMandatorOnly */ @SetProperty(name = "Active Mandator Only") public void setCurrentMandatorOnly(final boolean currentMandatorOnly){ this.currentMandatorOnly = currentMandatorOnly; } /** * @return Gives back the selected Mandator Kuerzel. */ @GetProperty(name = "Select Mandator", index = 10, widgetType = WidgetTypes.VENDOR, description = "Select a Mandator", vendorClass = SelectMandator.class) public String getSelectedMandator(){ return this.selectedMandatorID; } /** * @param Sets * the selected Mandator. */ @SetProperty(name = "Select Mandator") public void setSelectedMandator(final String mandID){ this.selectedMandatorID = mandID; } }
ea08dfac7f6ea453dc83d8aa326d5c53cf8feebc
[ "Java" ]
1
Java
hallo33/ArchieBerichte
69ab52a97d8896a24815915b19bbd42f332ec737
1e583989e85ffb6f6966fea7841250089754fd36
refs/heads/master
<file_sep>#!/usr/bin/env python3 from twython import Twython from local_setting import * TWITTER_ACCOUNT = { 'oauth_token_secret': SCARIBOT_ACCESS_SECRET, 'oauth_token': SCARIBOT_ACCESS_TOKEN, 'app_secret': SCARIBOT_CONSUMER_SECRET, 'app_key': SCARIBOT_CONSUMER_KEY } MAX_LEN = 140 t = Twython(**TWITTER_ACCOUNT) def dm(screen_name, message): if screen_name and message: if len(message) > MAX_LEN: head = message[:MAX_LEN] message = message[MAX_LEN:] t.send_direct_message(screen_name=screen_name, text=head) else: t.send_direct_message(screen_name=screen_name, text=message) return return dm(screen_name, message)
f656c23c01bde8481669d414c717eaa67c0d7ed6
[ "Python" ]
1
Python
scari/twitterlet
b4be29189ce6e0e6765c9f12a488bb9acea333b3
fe5493261eb6d923b7929ac97ee32ea804f65587
refs/heads/master
<file_sep>#include "RealSensePluginPrivatePCH.h" #include "RealSenseFace.h" #include "pxcsensemanager.h" #include "pxcfacedata.h" URealSenseFace::URealSenseFace(const FObjectInitializer &init) : UObject(init) { } /*void URealSenseFace::setBone(const Leap::Bone &bone) { }*/ void URealSenseFace::setFromRealSenseFace(void* facePointer) { //PXCFaceModule::PXCFaceData* face = (PXCHandData::PXCFaceData*)facePointer; }<file_sep>#pragma once #include "RealSensePluginPublicPCH.h" #include "RealSenseFace.generated.h" //not currently utilized, to be extended UCLASS(BlueprintType) class URealSenseFace : public UObject { GENERATED_UCLASS_BODY() public: UPROPERTY(EditAnywhere, BlueprintReadOnly, Category = "RealSense Face") FVector WorldPosition; void setFromRealSenseFace(void* facePointer); private: };<file_sep>#include "RealSensePluginPrivatePCH.h" #include "RealSenseUtility.h" #define RS_TO_UE_SCALE 100 FVector ueVectorFromRSVector(PXCPoint3DF32 in) { return FVector(-in.z, -in.x, in.y) * RS_TO_UE_SCALE; } FRotator ueRotFromRSQuat(PXCPoint4DF32 rQuat) { FQuat unconverted = FQuat(rQuat.x, rQuat.y, rQuat.z, rQuat.w); //Rotate the quaternion to UE space (applies R,Y,P order), P&Y axis need swapping //Equivalent of combining (rQuat, 0,90,-90) in blueprint return (FQuat(FRotator(0, 90, -90))*unconverted).Rotator(); }<file_sep>#include "RealSensePluginPrivatePCH.h" #include "IRealSensePlugin.h" #include "FRealSensePlugin.h" #include "SlateBasics.h" #include <windows.h> #include <wchar.h> #include "pxcsensemanager.h" #include "pxchandconfiguration.h" #include "RealSenseJoint.h" #include "RealSenseHand.h" #include "RealSenseInterface.h" #define PLUGIN_VERSION "0.1.3" IMPLEMENT_MODULE(FRealSensePlugin, RealSensePlugin) class DataCollector{ public: DataCollector() { // create the PXCSenseManager psm = PXCSenseManager::CreateInstance(); if (!psm) { UE_LOG(LogClass, Error, TEXT("RealSense failed to create instance, Unable to create the PXCSenseManager.")); return; } // Retrieve the underlying session created by the PXCSenseManager. // The returned instance is an PXCSenseManager internally managed object. session = psm->QuerySession(); if (session == NULL) { UE_LOG(LogClass, Error, TEXT("RealSense failed to create instance, Session not created by PXCSenseManager")); return; } //Set the version, log it for debugging. ver = session->QueryVersion(); UE_LOG(LogClass, Log, TEXT("Plugin v%s, Intel RSSDK Version %d.%d"), TEXT(PLUGIN_VERSION), ver.major, ver.minor); //log all available modules that are automatically loaded with the RSSDK for (int i = 0;; i++) { PXCSession::ImplDesc desc; if (session->QueryImpl(0, i, &desc) < PXC_STATUS_NO_ERROR) break; // Print the module friendly name and iuid (interface unique ID) UE_LOG(LogClass, Log, TEXT("Module[%d]: %s\n"), i, desc.friendlyName); UE_LOG(LogClass, Log, TEXT(" iuid = %x\n"), desc.iuid); } //Temp Vars tempJoint = NewObject < URealSenseJoint >(); tempHand = NewObject < URealSenseHand >(); } ~DataCollector() { psm->Release(); } PXCSenseManager *psm; PXCHandModule *handModule; PXCHandData* handData; PXCFaceModule* faceModule; PXCFaceData* faceData; PXCSession *session; PXCSession::ImplVersion ver; URealSenseJoint* tempJoint; URealSenseHand* tempHand; PXCHandConfiguration* handConfig = NULL; UObject* interfaceDelegate; }; //Init and Runtime void FRealSensePlugin::StartupModule() { //Make a new collector collector = new DataCollector; //Enable hand Tracking EnableHandDetection(); //EnableGestureDetection(true); //should be called instead //EnableFaceDetection(); } void FRealSensePlugin::ShutdownModule() { delete collector; } //for now this enables all gestures, future version will have control over this void FRealSensePlugin::EnableGestureDetection(bool enableAll) { PXCHandConfiguration* config = collector->handConfig; //Null check, hand has to be enabled before we can enable gestures if (config == NULL) return; //enable gestures config->LoadGesturePack(L"navigation"); //enable all gestures if (enableAll) config->EnableAllGestures(); else config->DisableAllGestures(); //single gesture enabling example //config->EnableGesture("tap"); config->ApplyChanges(); config->Update(); } void FRealSensePlugin::EnableHandDetection() { //local pointer PXCSenseManager* pp = collector->psm; /* Set Module */ pxcStatus status = pp->EnableHand(0); collector->handModule = pp->QueryHand(); PXCHandModule* handModule = collector->handModule; if (handModule == NULL || status != pxcStatus::PXC_STATUS_NO_ERROR) { UE_LOG(LogClass, Log, TEXT("Failed to pair the gesture module with I/O")); return; } if (pp->Init() >= PXC_STATUS_NO_ERROR) { collector->handData = collector->handModule->CreateOutput(); } // Hand Module Configuration PXCHandConfiguration* config = handModule->CreateActiveConfiguration(); config->EnableNormalizedJoints(true); if (true) config->SetTrackingMode(PXCHandData::TRACKING_MODE_FULL_HAND); config->EnableAllAlerts(); //config->EnableSegmentationImage(true); config->ApplyChanges(); config->Update(); collector->handConfig = config; //PXCHandData* outputData = handAnalyzer->CreateOutput(); //outputData->Update(); } void FRealSensePlugin::EnableFaceDetection() { //local pointer PXCSenseManager* pp = collector->psm; pxcStatus status = pp->EnableFace(); collector->faceModule = pp->QueryFace(); PXCFaceModule* faceModule = collector->faceModule; if (faceModule == NULL || status != pxcStatus::PXC_STATUS_NO_ERROR) { UE_LOG(LogClass, Log, TEXT("Failed to pair the gesture module with I/O")); return; } if (pp->Init() >= PXC_STATUS_NO_ERROR) { collector->faceData = collector->faceModule->CreateOutput(); } } void FRealSensePlugin::SetInterfaceDelegate(UObject* object) { if (object->GetClass()->ImplementsInterface(URealSenseInterface::StaticClass())) { collector->interfaceDelegate = object; } } //Delegate methods void FRealSensePlugin::RealSenseTick(float DeltaTime) { //UE_LOG(LogClass, Log, TEXT("Plugin::RealSenseTick")); //local pointer PXCSenseManager* pp = collector->psm; //Grab Frame pxcStatus sts = pp->AcquireFrame(true); //Check Frame status if (sts < PXC_STATUS_NO_ERROR) return; //log the hand PXCHandData* handData = collector->handData; //handModule->CreateOutput(); if (handData == NULL) { UE_LOG(LogClass, Warning, TEXT("Failed to bind hand data")); return; } handData->Update(); PXCHandData::JointData nodes[2][PXCHandData::NUMBER_OF_JOINTS] = {}; PXCHandData::ExtremityData extremitiesPointsNodes[2][PXCHandData::NUMBER_OF_EXTREMITIES] = {}; //Iterate hands for (pxcI32 i = 0; i < handData->QueryNumberOfHands(); i++) { //get hand information PXCHandData::IHand* hand; if (handData->QueryHandData(PXCHandData::AccessOrderType::ACCESS_ORDER_BY_TIME, i, hand) == PXC_STATUS_NO_ERROR) { //set and emit hand events collector->tempHand->setFromRealSenseHand(hand); IRealSenseInterface::Execute_HandMoved(collector->interfaceDelegate, collector->tempHand); //Iterate Joints PXCHandData::JointData jointData; for (int j = 0; j < PXCHandData::NUMBER_OF_JOINTS; j++) { hand->QueryTrackedJoint((PXCHandData::JointType)j, jointData); nodes[i][j] = jointData; //set our pointer collector->tempJoint->setFromRealSenseJoint(&jointData); //emit it IRealSenseInterface::Execute_JointMoved(collector->interfaceDelegate, collector->tempJoint); //FVector vect = FVector(jointData.positionWorld.x, jointData.positionWorld.y, jointData.positionWorld.z); //UE_LOG(LogClass, Log, TEXT("Joint: %s"), *vect.ToString()); } //emit hand gesture events PXCHandData::GestureData gestureData; //spread fingers if (handData->IsGestureFired(L"spreadfingers", gestureData)) { IRealSenseInterface::Execute_SpreadFingersGestureDetected(collector->interfaceDelegate, collector->tempHand); } //fist if (handData->IsGestureFired(L"fist", gestureData)) { IRealSenseInterface::Execute_FistGestureDetected(collector->interfaceDelegate, collector->tempHand); } //tap if (handData->IsGestureFired(L"tap", gestureData)) { IRealSenseInterface::Execute_TapGestureDetected(collector->interfaceDelegate, collector->tempHand); } //thumb_down if (handData->IsGestureFired(L"thumb_down", gestureData)) { IRealSenseInterface::Execute_ThumbDownGestureDetected(collector->interfaceDelegate, collector->tempHand); } //thumb_up if (handData->IsGestureFired(L"thumb_up", gestureData)) { IRealSenseInterface::Execute_ThumbUpGestureDetected(collector->interfaceDelegate, collector->tempHand); } //two_fingers_pinch_open if (handData->IsGestureFired(L"two_fingers_pinch_open", gestureData)) { IRealSenseInterface::Execute_TwoFingersPinchOpenGestureDetected(collector->interfaceDelegate, collector->tempHand); } //v_sign if (handData->IsGestureFired(L"v_sign", gestureData)) { IRealSenseInterface::Execute_VSignGestureDetected(collector->interfaceDelegate, collector->tempHand); } //full_pinch if (handData->IsGestureFired(L"full_pinch", gestureData)) { IRealSenseInterface::Execute_FullPinchGestureDetected(collector->interfaceDelegate, collector->tempHand); } //swipe if (handData->IsGestureFired(L"swipe", gestureData)) { IRealSenseInterface::Execute_SwipeGestureDetected(collector->interfaceDelegate, collector->tempHand); } //wave if (handData->IsGestureFired(L"wave", gestureData)) { IRealSenseInterface::Execute_WaveGestureDetected(collector->interfaceDelegate, collector->tempHand); } } } //Grab Face data //Resume next frame processing pp->ReleaseFrame(); }<file_sep>#pragma once class DataCollector; //class RealSenseDelegate; class FRealSensePlugin : public IRealSensePlugin { public: /** IModuleInterface implementation */ void StartupModule(); void ShutdownModule(); /** Delegate Method To subscribe to event calls, only supports one listener for now */ //void SetDelegate(RealSenseDelegate* newDelegate); //void RemoveDelegate(); /** Manual looping, currently called in main thread */ void RealSenseTick(float DeltaTime); void SetInterfaceDelegate(UObject* object); private: DataCollector *collector; void EnableHandDetection(); void EnableGestureDetection(bool enableAll); void EnableFaceDetection(); };<file_sep>#pragma once #include "pxcsensemanager.h" //Conversion Utilities - Adjusted for UE orientation FVector ueVectorFromRSVector(PXCPoint3DF32 in); FRotator ueRotFromRSQuat(PXCPoint4DF32 rQuat);
2709ca26b103abbdfff4523cf8209e90bb06aaf8
[ "C", "C++" ]
6
C++
NoorAliraqi/realsense-ue4
6f3cfe458143902087862c9974ac83c0e9a257d2
af29907b5af6324c426d1028de9630e96ba5c1bd
refs/heads/main
<repo_name>bl2456/ChatroomApp<file_sep>/src/components/Options.jsx import React from 'react'; const Options = (props) => { console.log('this is OPTIONS'); return ( <div>hi</div> ); } export default Options;<file_sep>/src/components/ChatFeed.jsx import React, {useEffect, useRef} from 'react'; import MessageForm from './MessageForm'; import OutgoingMessage from './OutgoingMessage'; import IncomingMessage from './IncomingMessage'; import axios from 'axios'; const ChatFeed = (props) => { //destructuring props const { chats, activeChat, userName, messages} = props; //console.log(activeChat); //console.log(messages); //if users are in some chats, then get the active one const chat = chats && chats[activeChat]; //console.log(chat); //console.log(chat, userName, messages); //console.log(props) //The code block below is for removing users that have just closed their tabs instead of logging out const removeUserConfig = (roomId, name) => { return { method: 'put', url: `https://api.chatengine.io/chats/${roomId}/people/`, headers: { 'Project-ID': "870b77de-6cfe-4b98-bceb-b6c8343a389b", 'User-Name': 'Brian_Blue_Lu', 'User-Secret': 'abc123', }, data : { 'username': name } } } // const adminAnnoucementConfig = (roomId, username) => { // return{ // method: 'post', // url: `https://api.chatengine.io/chats/${roomId}/messages/`, // headers: { // 'Project-ID': "870b77de-6cfe-4b98-bceb-b6c8343a389b", // 'User-Name': 'Brian_Blue_Lu', // 'User-Secret': 'abc123', // }, // data : { // 'text' : `${username} has left the biome` // } // } // } const personLefted = (username) => { Object.keys(chats).forEach(async(key) => { try { await axios(removeUserConfig(key, username)); console.log(`${username} removed from chat ${key}`); // await axios(adminAnnoucementConfig(key, username)); // console.log('Brian posted leave message for animal'); } catch(error) { console.log(error); } }); } ////////////////////////////////////////////////////////////////////////////////////// const feed = useRef(null); const scrollToBottom = () => { if(chat){ //console.log(feed.current); feed.current.scrollTop = feed.current.scrollHeight; } else { return null; } } const renderReadReceipts = (message, isMyMessage) => { //console.log(chat); //return all who saw this message last return chat.people.map((person, index) => { if (person.person.is_online === false && person.person.username !== 'Brian_Blue_Lu'){ return null; } const color = person.person.username.split('_')[1]; //console.log(color); return (person.last_read === message.id) && ( <div key={`read-${index}`} className="read-receipt" style={{float: isMyMessage ? 'right' : 'left', backgroundColor: color}}/> ) }) } const renderMessages = () => { const keys = Object.keys(messages); //console.log(keys); return keys.map((key, index) => { const message = messages[key]; //console.log(message); //find previous message //useful for not rendering the username/profilepic again if prev msg is from same person const lastMessageKey = (index === 0) ? null : keys[index - 1]; //console.log(lastMessageKey); // did "client" send this message? const isMyMessage = userName === message.sender.username; return ( <div key={`msg-${index}`} style={{width: '100%'}}> <div className="message-block"> {/* if it message by client, render OutgoingMessage, otherwise render IncomingMessage */} {isMyMessage ? <OutgoingMessage message={message}/> : <IncomingMessage message={message} lastMessage={messages[lastMessageKey]}/> } </div> <div className="read-receipts" style={{ marginRight: isMyMessage ? '15px' : '0px', marginLeft: isMyMessage ? '0px' : '68px' }}> {renderReadReceipts(message, isMyMessage)} </div> </div> ); }) } useEffect( () => {scrollToBottom()}, [messages]); if(!chat) return 'Loading ...'; return ( <> {(!chat) ? 'Loading ...' : <div className="chat-feed" ref={feed}> {chat?.people.forEach(person => { if (person.person.is_online === false && person.person.username !== 'Brian_Blue_Lu' && person.last_read){ personLefted(person.person.username); } })} {/*Header Section */} <div className='chat-title-container'> <div className="chat-title"> {chat.title} </div> <div className="chat-subtitle"> {chat.people.map((person) => { return ` ${person.person.username}`; })} </div> </div> {/*Messages Section */} {renderMessages()} <div style={{height:'100px'}} /> {/*Form Section*/} <div className="message-form-container"> <MessageForm {... props} chatId={activeChat} /> </div> </div> } </> ) } export default ChatFeed; <file_sep>/src/App.js import { ChatEngine } from 'react-chat-engine'; import './App.css'; import ChatFeed from './components/ChatFeed'; import JoinForm from './components/JoinForm'; import Options from './components/Options'; import AddChat from './components/AddChat'; import PeopleSettings from './components/PeopleSettings'; import MessageAlert from './assets/audio/click.mp3'; const App = () => { //If we are not "logged in", render join form if(!sessionStorage.getItem('zooUsername')){ return <JoinForm /> } //else render the chat UI return ( <ChatEngine height="100vh" projectID="870b77de-6cfe-4b98-bceb-b6c8343a389b" userName={sessionStorage.getItem('zooUsername')} userSecret={sessionStorage.getItem('zooPassword')} renderChatFeed={(props) => <ChatFeed {...props}/>} renderPeopleSettings={(creds,chat) => <PeopleSettings creds={creds} chat={chat} />} renderOptionsSettings={(creds, chat) => <Options creds={creds} chat={chat} />} renderNewChatForm={(creds) => <AddChat {...creds} />} onNewMessage={() => new Audio(MessageAlert).play()} onEditChat={(chatId) => console.log(chatId)} /> //ChatEngine props taken // height - height of the component // projectID - your project id given on chatengine.io // userName - username on chatengine.io // userSecret - user password on chatengine.io // renderChatFeed - use your own created component to display chat ); } export default App; //exports the App component whenever another file calls (import 'App.js')<file_sep>/src/components/MessageForm.jsx import React from 'react'; import {useState} from 'react'; import {sendMessage, isTyping} from 'react-chat-engine'; import {SendOutlined, PictureOutlined} from '@ant-design/icons'; const MessageForm = (props) => { //value is the getter, setValue is the setter for the values in the input element const [value, setValue] = useState(''); const {chatId, creds} = props; // console.log('creds is'); // console.log(creds); const handleSubmit = (e) => { //stop the default page refresh when we submit html forms e.preventDefault(); //remove leading and ending white spaces const message = value.trim(); if(message.length > 0){ //sendMessage takes in credentials, the chat id, and an object with the message //https://chatengine.io/docs/functions#send_message sendMessage(creds, chatId, {'text' : message}); } setValue(''); }; const handleChange = (e) => { //on every keystroke event, save value in input inside useState //console.log(e.target.value); setValue(e.target.value); isTyping(props, chatId); }; //for handling image uploads const handleUpload = (e) => { sendMessage(creds, chatId, {'files': e.target.files, 'text': ''}); } return( <form className="message-form" onSubmit={handleSubmit}> {/* text input */} <input className="message-input" placeholder="Chat away :D !" value={value} onChange={handleChange} onSubmit={handleSubmit}/> {/* image input */} <label htmlFor="upload-button"> <span className="image-button"> <PictureOutlined className="picture-icon"/> </span> </label> <input id="upload-button" type="file" multiple={false} style={{display: 'none'}} onChange={handleUpload}/> <button className='send-button' type='submit'> <SendOutlined className="send-icon" /> </button> </form> ); }; export default MessageForm;<file_sep>/src/components/PeopleSettings.jsx import React from 'react'; const PeopleSettings = ({creds, chat}) => { // console.log('hi'); //console.log(chat); const people = chat.people; //console.log(people); return ( <div className='ce-people-section'> <div className='ce-section-title-container'> <div className='ce-section-title'> People </div> </div> <div style={{height: '12px'}} /> <div className='ce-people-list'> {people.map((person, index) => { // if (person.person.is_online === false && person.person.username !== 'Brian_Blue_Lu'){ // return null; // } const personNameInfo = person.person.username.split('_'); const color = personNameInfo[1]; return ( <div className='ce-person-container' key={index}> <div className='ce-person-avatar' style={{backgroundColor: color}}> <div className='ce-person-avatar-text'> {`${personNameInfo[0][0]}${personNameInfo[1][0]}${personNameInfo[2][0]}`} </div> </div> <div className='ce-person-text'> {person.person.username} </div> </div> ) })} </div> <div style={{height: '12px'}} /> </div> ); } export default PeopleSettings;<file_sep>/src/index.js import React from 'react'; import ReactDOM from 'react-dom'; import App from './App'; //import App component from same file ReactDOM.render(<App />, document.getElementById('root')); //mounting App component to element with id 'root' in the html<file_sep>/src/components/IncomingMessage.jsx import React from 'react'; const IncomingMessage = ({message, lastMessage}) => { //is this the user's first message? const isUserFirstMessage = !lastMessage || (lastMessage.sender.username !== message.sender.username); const userName = message.sender.username.split('_'); const adj = userName[0]; const color = userName[1]; const name = userName[2]; // console.log(message.id); // console.log(lastMessage); return( <> {isUserFirstMessage && ( <div className="message-username" style={{color: color}}>{message.sender.username}</div> )} <div className="message-row"> {/* Include "avatar" if it is first message */} {isUserFirstMessage && ( <div className="message-avatar" style={{backgroundColor: color}}> <div className="message-avatar-text">{`${adj[0]}${color[0]}${name[0]}`}</div> </div> )} {/* Message structure same as outgoing message, so copied code from there Check if image*/} {(message.attachments.length > 0) ? (<img src={message.attachments[0].file} alt="message-attachment" className="message-image" style={{ marginLeft: isUserFirstMessage ? '4px' : '48px'}}/>) : (<div className="message" style={{ marginLeft: isUserFirstMessage ? '4px' : '48px', float: 'left', backgroundColor: '#CABCDC'}} > {message.text} </div>) } </div> </> ); }; export default IncomingMessage;
167a0d5bb7dbabe3d486254fcba65c3bf2e85e87
[ "JavaScript" ]
7
JavaScript
bl2456/ChatroomApp
5d1af663e6bbaeebd2ac4774c6aaf7575124e174
4ecd14a8f7487c01a6f7988072b90a874642cb1f
refs/heads/master
<file_sep>import pandas as pd import os import glob from os import path class ConsolidateXLSX: def __init__(self, consolidate_from_path, save_to_path): self.consolidate_from_path = consolidate_from_path self.save_to_path = save_to_path self.data='' self.get_data() def get_data(self): print("Getting Data") all_data = pd.DataFrame() files = glob.glob(self.consolidate_from_path) for f in files: print("Loading file: ",f) df = pd.read_excel(f) df.dropna(how="all",inplace=True) all_data = all_data.append(df, ignore_index=True) self.data = all_data return all_data def save_to(self): if path.exists(self.save_to_path): print("Removing old file") os.remove(self.save_to_path) writer = pd.ExcelWriter(self.save_to_path) self.data.to_excel(writer,'consolidado', header=False, index=False) print("Creating consolidated file") writer.save() print("Finished") <file_sep>from classes.ConsolidateXLSX import ConsolidateXLSX ConsolidateXLSX(r"D:\files\originals\*.xlsx", r"D:\files\consolidated.xlsx").save_to()
c95fb3d84031bafe5550e2da470d70821d2c3fd4
[ "Python" ]
2
Python
wildiney/py-xlsx-consolidate
b3e6eed21d7f95d6731e3577bc8b2cf878c63a83
63fe02eb87c51c1067624fa2c31836bbf96ca46b
refs/heads/master
<repo_name>mohit138/Ball-Detection<file_sep>/Ball Detection/ballDetection.py # ball detect import cv2 import imutils import argparse import numpy as np import math ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to input image") args = vars(ap.parse_args()) image = cv2.imread(args["image"]) (h,w,d)=image.shape w=int(.15*w) h=int(.15*h) image = cv2.resize(image,(w,h)) cv2.imshow("Image",image) blur = cv2.GaussianBlur(image,(9,9),0) #cv2.imshow("blur",blur) hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV) #cv2.imshow("hsv",hsv) lower_red = np.array([ 134, 135, 100]) upper_red = np.array([ 215, 215, 256]) mask = cv2.inRange(hsv, lower_red, upper_red) #cv2.imshow("mask",mask) mask = cv2.dilate(mask, None, iterations=12) cv2.imshow("mask_dilated",mask) mask = cv2.erode(mask, None, iterations=12) cv2.imshow("mask_eroded",mask) """ cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) output=image.copy() cv2.drawContours(output, cnts, -1, (0,255,0),3) cv2.imshow("output",output) """ # below are various ways to use # 1. find approx center of contour,( if only a single contour is detected.) """ cnt,hierarchy = cv2.findContours(mask.copy(), 1,2) print(cnt) cntr=cnt[0] M = cv2.moments(cntr) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) print("{} {}".format(cx,cy)) cv2.circle(image, (cx,cy),5,(0,255,0),-1) #center of ball cv2.circle(image, (cx,cy),37,(0,255,0),2) # ball cv2.imshow("ball",image) """ """ # 2. finding center of largest contour output = image.copy() cnt,hierarchy = cv2.findContours(mask.copy(), 1,2) c = max(cnt, key= cv2.contourArea) M = cv2.moments(c) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) area = cv2.contourArea(c) radius=math.sqrt(area/3.1415) print("{} {}".format(cx,cy)) cv2.circle(image, (cx,cy),5,(0,255,0),-1) #center of ball cv2.circle(image, (cx,cy),int(radius),(0,255,0),2) # ball cv2.imshow("ball",image) """ #3. maximum area is made convex from all sides. using convex hull """ output = image.copy() cnt,hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) c = max(cnt, key= cv2.contourArea) hull=cv2.convexHull(c) cv2.drawContours(output, hull, -1, (0,255,0),3) cv2.imshow("contour",output) M = cv2.moments(hull) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) area = cv2.contourArea(c) radius=math.sqrt(area/3.1415) print("{} {}".format(cx,cy)) cv2.circle(image, (cx,cy),5,(0,255,0),-1) #center of ball cv2.circle(image, (cx,cy),int(radius),(0,255,0),2) # ball cv2.imshow("ball",image) """ #4. maximum contour is used, to obtain approx radius and center output = image.copy() cnt,hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) c = max(cnt, key= cv2.contourArea) hull=cv2.convexHull(c) cv2.drawContours(output, hull, -1, (0,255,0),3) cv2.imshow("output",output) (cx,cy),radius = cv2.minEnclosingCircle(hull) center=(int(cx),int(cy)) radius=int(radius) cv2.circle(image, center,5,(0,255,0),-1) #center of ball cv2.circle(image, center,radius,(0,255,0),2) # ball cv2.imshow("ball",image) cv2.waitKey(0) <file_sep>/README.md # Ball-Detection Ball is detected with help of open cv. <file_sep>/Ball Detection/ballDetectionVideo.py import cv2 import imutils import argparse import numpy as np import argparse ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", help="path to the (optional) video file") ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size") args = vars(ap.parse_args()) cap=cv2.VideoCapture(args["video"]) #cap = cv2.VideoCapture("E:\cv_stuff\Ball Detection and Tracing\Ball Detection\ball.mp4") #cap=cv2.VideoCapture(0) w=cap.get(3) h=cap.get(4) print("{} {}".format(w,h)) w=int(.2*w) h=int(.2*h) print("{} {}".format(w,h)) if (cap.isOpened()==False): print("Error opening video file") out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (w,h)) while(cap.isOpened()): ret, frame= cap.read() frame = cv2.resize(frame,(w,h)) if ret == True: frame = cv2.resize(frame,(w,h)) #cv2.imshow("frame",frame) blur = cv2.GaussianBlur(frame,(9,9),0) #cv2.imshow("blur",blur) gray=cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY) # cv2.imshow("gray",gray) # thresh=cv2.threshold(gray,80,255,cv2.THRESH_BINARY_INV)[1] # cv2.imshow("thresh",thresh) # edged=cv2.Canny(blur,20,100) # cv2.imshow("edged",edged) hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV) # cv2.imshow("hsv",hsv) lower_red = np.array([ 134, 135, 100]) upper_red = np.array([215, 215, 256]) mask = cv2.inRange(hsv, lower_red, upper_red) #cv2.imshow("mask",mask) mask = cv2.dilate(mask, None, iterations=10) #cv2.imshow("mask",mask) mask = cv2.erode(mask, None, iterations=10) #cv2.imshow("mask",mask) # using making contours, not so accurate, but can work # used moment to obtain the center of ball """ cnt,hierarchy = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) # cnts = imutils.grab_contours(cnts) output=frame.copy() c = max(cnt, key= cv2.contourArea) hull=cv2.convexHull(c) cv2.drawContours(output, hull, -1, (0,255,0),3) cv2.imshow("output",output) M = cv2.moments(c) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) print("{} {}".format(cx,cy)) cv2.circle(frame, (cx,cy),5,(0,255,0),-1) cv2.circle(frame, (cx,cy),44,(0,255,0),2) # ball cv2.imshow("ball",frame) """ # used direct function to obtain the centre cnt,hierarchy = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) c = max(cnt, key= cv2.contourArea) hull=cv2.convexHull(c) (cx,cy),radius = cv2.minEnclosingCircle(hull) center=(int(cx),int(cy)) radius=int(radius) cv2.circle(frame, center,5,(0,255,0),-1) #center of ball cv2.circle(frame, center,radius,(0,255,0),2) # ball cv2.imshow("ball",frame) out.write(frame) # cv2.drawContours(output, cnts, -1, (0,255,0),3) # cv2.imshow("output",output) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() cv2.destroyAllWindows()
a0acb64d79925ef693242f63b931cc0301634656
[ "Markdown", "Python" ]
3
Python
mohit138/Ball-Detection
a187938a4e9cd10e10596d8eeec4d0262fbd029a
817506c62027468adbf12a384d08cf6dc32fd78c
refs/heads/master
<repo_name>annielya/cart<file_sep>/src/reducer/cart.js import {ADD, INCREASE, DECREASE, REMOVE, SEARCH} from '../action/action-types' const items = [ {id: 1, name: "Apple", description:"Eat one everyday, keep the doctor away", price: 12, }, {id: 2, name: "Grape", description:"Wine is great, but grape is better", price: 11}, {id: 3, name: "Pineapple", description:"Enjoy but don't forget to peer first", price: 8} ] const initialStage = {items, addedItems:[], totalPrice : 0, totalItem: 0, search: ""}; const reducer = (state = initialStage, action)=>{ switch(action.type){ case ADD: { let item = state.addedItems.find(item => item.id === action.id) if(!item){ let newItem = state.items.find(item => item.id === action.id) let newPrice = state.totalPrice + newItem.price let newTotal = state.totalItem + 1 newItem.quantity = 1 return { ...state, addedItems: [...state.addedItems, newItem], totalPrice: newPrice, totalItem: newTotal } }else { item.quantity++ let newPrice = state.totalPrice + item.price let newTotal = state.totalItem + 1 return { ...state, totalPrice: newPrice, totalItem: newTotal } } } case INCREASE: { let item = state.addedItems.find(item => item.id === action.id) item.quantity++ let newPrice = state.totalPrice + item.price let newTotal = state.totalItem + 1 return { ...state, addedItems: [...state.addedItems], totalPrice: newPrice, totalItem: newTotal } } case DECREASE : { let item = state.addedItems.find(item => item.id === action.id) let newPrice = state.totalPrice - item.price let newTotal = state.totalItem - 1 if( item.quantity-- > 1) { return { ...state, addedItems: [...state.addedItems], totalPrice: newPrice, totalItem: newTotal } } else { let newItems = state.addedItems.filter(item => item.id !== action.id) return { ...state, addedItems: newItems, totalPrice: newPrice, totalItem: newTotal } } } case REMOVE: { let newItems = state.addedItems.filter(item => item.id !== action.id) let item = state.addedItems.find(item => item.id === action.id) let newPrice = state.totalPrice - item.price * item.quantity let newTotal = state.totalItem - item.quantity return { ...state, addedItems: newItems, totalPrice: newPrice, totalItem: newTotal } } case SEARCH: { let newSearch = state.items.filter(item=> item.name.toLowerCase().includes(action.search.toLowerCase()) === true); if(action.search === ""){ return { ...state, items: items } } return { ...state, items: newSearch } } default: { return state } } } export default reducer;<file_sep>/src/component/Cart.js import React, { Component } from "react"; import { Link } from 'react-router-dom' import { connect } from 'react-redux' import {addToCart, increase, decrease, remove} from '../action/action' class Cart extends Component { renderlist = () => { return ( this.props.item.map(item => { return ( <tr key={item.id}> <th>{item.name}</th> <th> <button onClick={()=> this.props.decrease(item.id)}>-</button> {item.quantity} <button onClick={()=> this.props.increase(item.id)}>+</button> </th> <th>$ {item.price}</th> <th><button onClick={()=> this.props.remove(item.id)}>Remove</button></th> </tr> ) }) ) } render(){ return ( <div> <p>Total Item: {this.props.count}</p> <p>Total Price: {this.props.price}</p> <table> <thead> <tr> <th>Name</th> <th>Quantity</th> <th>Price</th> <th><Link to="/">Back To Home</Link></th> </tr> </thead> <tbody>{this.renderlist()}</tbody> </table> </div> ) } } const mapStateToProps = (state) => { return { item: state.addedItems, price: state.totalPrice, count: state.totalItem } } const mapDispatchToProps = (dispatch) => { return { increase: (id) => dispatch(increase(id)), decrease: (id) => dispatch(decrease(id)), remove: (id) => dispatch(remove(id)) } } export default connect(mapStateToProps, mapDispatchToProps)(Cart);
4debf61192ac558aa5e4960587e0eb488fb6b910
[ "JavaScript" ]
2
JavaScript
annielya/cart
9fa1006f38a1f4cb3ab765a04b9cffe37f056bd0
a6ec923d2051ff0c93759576d7cb69ea1f9c6c2a
refs/heads/master
<file_sep>// 获取调用链 function getStackTrace() { var Exception = Java.use("java.lang.Exception"); var ins = Exception.$new("Exception"); var straces = ins.getStackTrace(); if (undefined == straces || null == straces) { return; } var result = ""; for (var i = 0; i < straces.length; i++) { var str = " " + straces[i].toString(); result += str + "\r\n"; } Exception.$dispose(); return result; } //告警发送 function alertSend(action, messages) { var myDate = new Date(); var _time = myDate.getFullYear() + "-" + myDate.getMonth() + "-" + myDate.getDate() + " " + myDate.getHours() + ":" + myDate.getMinutes() + ":" + myDate.getSeconds(); send({"type": "notice", "time": _time, "action": action, "messages": messages, "stacks": getStackTrace()}); } // APP申请权限 function checkRequestPermission() { var ActivityCompat = Java.use("androidx.core.app.ActivityCompat") ActivityCompat.requestPermissions.overload('android.app.Activity', '[Ljava.lang.String;', 'int').implementation = function (p1, p2, p3) { var temp = this.requestPermissions(p1, p2, p3); alertSend("APP申请权限", "申请权限为: " + p2); return temp } } // APP获取IMEI/IMSI function getPhoneState() { var TelephonyManager = Java.use("android.telephony.TelephonyManager"); // API level 26 获取单个IMEI的方法 TelephonyManager.getDeviceId.overload().implementation = function () { var temp = this.getDeviceId(); alertSend("获取IMEI", "获取的IMEI为: " + temp) return temp; }; //API level 26 获取多个IMEI的方法 TelephonyManager.getDeviceId.overload('int').implementation = function (p) { var temp = this.getDeviceId(p); alertSend("获取IMEI", "获取(" + p + ")的IMEI为: " + temp); return temp; }; //API LEVEL26以上的获取单个IMEI方法 TelephonyManager.getImei.overload().implementation = function () { var temp = this.getImei(); alertSend("获取IMEI", "获取的IMEI为: " + temp) return temp; }; // API LEVEL26以上的获取多个IMEI方法 TelephonyManager.getImei.overload('int').implementation = function (p) { var temp = this.getImei(p); alertSend("获取IMEI", "获取(" + p + ")的IMEI为: " + temp); return temp; }; //imsi TelephonyManager.getSimSerialNumber.overload().implementation = function () { var temp = this.getSimSerialNumber(); alertSend("获取IMSI", "获取IMSI为(String): " + temp); return temp; }; //imsi TelephonyManager.getSubscriberId.overload().implementation = function () { var temp = this.getSubscriberId(); alertSend("获取IMSI", "获取IMSI为(int): " + temp); return temp; } // //imsi TelephonyManager.getSimSerialNumber.overload('int').implementation = function (p) { var temp = this.getSimSerialNumber(p); alertSend("获取IMSI", "参数为:(" + p + "), 获取IMSI为(int): " + temp); return temp; }; } // 获取Mac地址 function getMacAddress() { var WifiInfo = Java.use("android.net.wifi.WifiInfo"); WifiInfo.getMacAddress.implementation = function () { var temp = this.getMacAddress(); alertSend("获取Mac地址", "获取到的Mac地址: " + temp) return temp; }; var NetworkInterface = Java.use("java.net.NetworkInterface"); NetworkInterface.getHardwareAddress.overload().implementation = function () { var temp = this.getHardwareAddress(); alertSend("获取Mac地址", "获取到的Mac地址: " + temp) return temp; }; } // 获取系统属性(记录关键的) function getSystemProperties() { var SystemProperties = Java.use("android.os.SystemProperties"); SystemProperties.get.overload('java.lang.String').implementation = function (p1) { var temp = this.get(p1); if (p1 == "ro.serialno") { alertSend("获取设备序列号", "获取(" + p1 + "),值为:" + temp); } return temp; } SystemProperties.get.overload('java.lang.String', 'java.lang.String').implementation = function (p1, p2) { var temp = this.get(p1, p2) if (p1 == "ro.serialno") { alertSend("获取设备序列号", "获取(" + p1 + " 、 " + p2 + "),值为:" + temp); } return temp; } } //获取手机通信录 function getPhoneAddressBook() { var contacts_uri = Java.use("android.provider.ContactsContract$Contacts").CONTENT_URI.value.toString(); var contentResolver = Java.use("android.content.ContentResolver"); contentResolver.query.overload('android.net.Uri', '[Ljava.lang.String;', 'android.os.Bundle', 'android.os.CancellationSignal').implementation = function (uri, str, bundle, sig) { if (uri == contacts_uri) { alertSend("获取手机通信录", "获取uri为:" + uri) } return this.query(uri, str, bundle, sig); } } // 获取安卓ID function getAndroidId() { var SettingsSecure = Java.use("android.provider.Settings$Secure"); SettingsSecure.getString.implementation = function (p1, p2) { if (p2.indexOf("android_id") < 0) { return this.getString(p1, p2); } var temp = this.getString(p1, p2); alertSend("获取Android ID", "参数为:" + p2 + ",获取到的ID为:" + temp); return temp; } } //获取其他app信息 function getPackageManager() { var PackageManager = Java.use("android.content.pm.PackageManager"); var ApplicationPackageManager = Java.use("android.app.ApplicationPackageManager"); PackageManager.getInstalledPackages.overload('int').implementation = function (p1) { var temp = this.getInstalledPackages(p1); alertSend("获取其他app信息", "1获取的数据为:" + temp); return temp; }; PackageManager.getInstalledApplications.overload('int').implementation = function (p1) { var temp = this.getInstalledApplications(p1); alertSend("获取其他app信息", "getInstalledApplications获取的数据为:" + temp); return temp; }; ApplicationPackageManager.getInstalledPackages.overload('int').implementation = function (p1) { var temp = this.getInstalledPackages(p1); alertSend("获取其他app信息", "getInstalledPackages获取的数据为:" + temp); return temp; }; ApplicationPackageManager.getInstalledApplications.overload('int').implementation = function (p1) { var temp = this.getInstalledApplications(p1); alertSend("获取其他app信息", "getInstalledApplications获取的数据为:" + temp); return temp; }; ApplicationPackageManager.queryIntentActivities.implementation = function (p1, p2) { var temp = this.queryIntentActivities(p1, p2); alertSend("获取其他app信息", "参数为:" + p1 + p2 + ",queryIntentActivities获取的数据为:" + temp); return temp; }; ApplicationPackageManager.getApplicationInfo.implementation = function (p1, p2) { var temp = this.getApplicationInfo(p1, p2); var string_to_recv; // 判断是否为自身应用,是的话不记录 send({"type": "app_name", "data": p1}); recv(function (received_json_object) { string_to_recv = received_json_object.my_data; }).wait(); if (string_to_recv) { alertSend("获取其他app信息", "getApplicationInfo获取的数据为:" + temp); } return temp; }; } // 获取位置信息 function getGSP() { var locationManager = Java.use("android.location.LocationManager"); locationManager.getLastKnownLocation.overload("java.lang.String").implementation = function (p1) { var temp = this.getLastKnownLocation(p1); alertSend("获取位置信息", "获取位置信息,参数为:" + p1) return temp; } locationManager.requestLocationUpdates.overload("java.lang.String", "long", "float", "android.location.LocationListener").implementation = function (p1, p2, p3, p4) { var temp = this.requestLocationUpdates(p1, p2, p3, p4); alertSend("获取位置信息", "获取位置信息") return temp; } } // 调用摄像头(hook,防止静默拍照) function getCamera() { var Camera = Java.use("android.hardware.Camera"); Camera.open.overload("int").implementation = function (p1) { var temp = this.open(p1); alertSend("调用摄像头", "调用摄像头id:" + p1.toString()) return temp; } } function main() { Java.perform(function () { console.log("合规检测敏感接口开始监控..."); send({"type": "isHook"}) checkRequestPermission(); getPhoneState(); getMacAddress(); getSystemProperties(); getPhoneAddressBook(); getAndroidId(); getPackageManager(); getGSP(); getCamera(); }); } //在spawn模式下,hook系统API时如javax.crypto.Cipher建议使用setImmediate立即执行,不需要延时 //在spawn模式下,hook应用自己的函数或含壳时,建议使用setTimeout并给出适当的延时(500~5000) // main(); //setImmediate(main) // setTimeout(main, 3000);
405df0d1cf27bd486ac2c1a1e05ce995fa116d47
[ "JavaScript" ]
1
JavaScript
Crewcutbro/camille
2139f1e9ae315cea8dd1942f11b4bb67b334763d
2c272b360ee0c1e9335f804bf0542bcd31c2bb1c
refs/heads/master
<repo_name>loko64z/cqs<file_sep>/app/cambio_contra.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>Cambio contraseña | CQS</title> <?php require_once('include/header.php'); ?> </head> <body class="page page-child page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(img/bread2.jpg)"> <div class="container_16"> <h1>Cambio de contraseña</h1> <p></p> <div class="page-path"> <div id="crumbs"><span typeof="v:Breadcrumb"> <a rel="v:url" property="v:title" href="http://cqslab.com/">Inicio</a></span> <span class="delimiter">|</span> <span typeof="v:Breadcrumb"> <span class="delimiter">|</span> <span class="current">Cambio de contraseña</span> </div> </div> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <h2 class="title">Cambio contraseña</h2> <p>Puedes cambiar la contraseña de tu cuenta de CQS en cualquier momento. Basta con introducir la nueva contraseña en los campos inferiores.</p> <form method="post" id="login_form" class="login" action="service_requests/chg_pwd.php"> <div class="input-field theme-one-half"> <input id="password1" name="password1" placeholder="<PASSWORD>" type="password" value=""> </div> <div class="input-field theme-one-half theme-column-last"> <input id="password2" type="password" name="password2" placeholder="<PASSWORD>" value=""> </div> <div class="clear"></div> <input type="hidden" id="_wpnonce" name="_wpnonce" value="451e4d20ee"><input type="hidden" name="_wp_http_referer" value="/contact-us/"> <span class="sc-button sc-default fa fa-play pull-right" style="background-color:#a92037"> <span><input value="Enviar" id="cambiar" type="submit"></span> </span> <div style="margin:30px; float:left;"></div> </form> </div> </div> </div> </section> <?php require_once('include/footer.php'); ?> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?></body> <script type="text/javascript"> $("#cambiar").click(function() { if( $("#password1").value == $("#password2").value){ if($("#password1").value.length > 6){ $("#login_form").submit(); }else{ alert("La contraseña es demasiado corta"); } }else{ alert("Las contraseñas deben coincidir"); } }); </script> </html><file_sep>/app/service_requests/cargar_bonos.php <?php require_once "redirect.php"; session_start(); $patid =$_SESSION["pat_id"]; $data = array("patid" => $patid); $data = enviar("cargar_bonos", $data); ?><file_sep>/app/include/observaciones.php <?php $tipobono = $_GET['tipobono']; if(!$tipobono) { return false; } $observaciones = array( 1 => array('At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.'), 2 => array('At vero eos et accusam et justo duo dolores et ea rebum.'), 3 => array('Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.'), 4 => array('Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.'), 5 => array('At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.') ); $observacionactual = $observaciones[$tipobono]; ?> <?php foreach($observacionactual as $observacion) { ?> <p><?php echo $observacion; ?></p> <?php } ?> <file_sep>/app/alta_cita.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>Pedir cita | CQS</title> <?php require_once('include/header.php'); ?> <link href="http://ajax.googleapis.com/ajax/libs/jqueryui/1.8/themes/base/jquery-ui.css" rel="stylesheet" type="text/css"/> </head> <body class="page page-child page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(img/bread2.jpg)"> <div class="container_16"> <h1>Pedir cita</h1> <p></p> <div class="page-path"> <div id="crumbs"><span typeof="v:Breadcrumb"> <a rel="v:url" property="v:title" href="index.php">Inicio</a></span> <span class="delimiter">|</span> <span typeof="v:Breadcrumb"> <span class="delimiter">|</span> <span class="current">Pedir cita</span> </div> </div> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <h2 class="title" data-animation="">Pedir cita</h2> <div class="theme-one-third"><h6>Tipo de servicio: </h6> <select class="form-control input-medium" name="tipo"> <option value="">Selecciona...</option> <option value="tipo1">Fisioterapia 30"</option> <option value="tipo2">Fisioterapia 50"</option> <option value="tipo3">Pilates</option> </select></div> <div class="theme-one-third"><h6>Desde:</h6> <input type="text" id="datepicker" class="form-control" name="desde" value=""> </div> <div class="theme-one-third theme-column-last"><h6>Hasta:</h6> <input type="text" id="datepicker1" class="form-control" name="hasta" value=""> </div> <div class="clear"></div> <a class="sc-button sc-default sc-orange fa fa-plus " data-animation="" href="#" target="_blank" style="background-color:#a92037"><span>Buscar</span></a> </div> <div id="tabla_citas" class="sc-table sc-dark-header " data-animation=""> <div class="sc-row"> <div class="sc-col">#</div> <div class="sc-col">Fecha</div> <div class="sc-col">Hora</div> <div class="sc-col">Lugar</div> <div class="sc-col">Estado</div> <div class="sc-col">Observaciones</div> </div> </div> <!-- .sc-tabla --> </div> </div> </section> <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.2/jquery.min.js"></script> <?php require_once('include/footer.php'); ?> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?> <script type="text/javascript" src="js/jquery-ui.min.js"></script> <script type="text/javascript"> $(document).ready(function() { $.ajax({ url: "service_requests/cargar_citas.php", protocol: "http", type: "POST", data: { patient: $("#usr_id").val() }, dataType: "JSON", success: function (jsonStr) { var trHTML = ''; var i = 1; $.each(jsonStr.data.rows, function (i, item) { var office = item.office; if(!office){ office = ""; } trHTML += ' <div class="sc-row"> <div class="sc-col">' + i + '</div> <div class="sc-col">' + item.date + '</div> <div class="sc-col">' + item.tmbegin + '</div> <div class="sc-col">' + office +'</div> <div class="sc-col">' + item.status +'</div> <div class="sc-col">' + item.stype + '</div></div>' i++; }); $('#tabla_citas').append(trHTML); } }); }); </script> <script> $(document).ready(function() { $("#datepicker").datepicker({ changeMonth: true, changeYear: true, maxDate: '12m 0d', minDate: new Date(2015, 1 - 1, 1), dateFormat: 'dd-mm-yy', showOn: 'button', buttonImage: "img/calendar.png", buttonImageOnly: true, autoSize: true, altFormat: 'mm-dd-yy', setDate: new Date(), beforeShowDay: $.datepicker.noWeekends, // El finde no te deja elegirlo firstDay: 1 // Empieza el lunes }) $("#datepicker1").datepicker({ changeMonth: true, changeYear: true, maxDate: '12m 0d', minDate: new Date(2015, 1 - 1, 1), dateFormat: 'dd-mm-yy', showOn: 'button', buttonImage: "img/calendar.png", buttonImageOnly: true, autoSize: true, altFormat: 'mm-dd-yy', setDate: new Date(), beforeShowDay: $.datepicker.noWeekends, // El finde no te deja elegirlo firstDay: 1 // Empieza el lunes }) }); </script> </body> </html><file_sep>/app/service_requests/cargar_citas.php <?php require_once "redirect.php"; session_start(); $patid =$_SESSION["pat_id"]; $data = array("patient" => intval($patid)); $data = enviar("cargar_citas", $data); ?><file_sep>/app/citas2.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>Citas programadas | CQS</title> <?php require_once('include/header.php'); ?> </head> <body class="page page-id-2471 page-child parent-pageid-2369 page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(http://cqslab.com/wp-content/uploads/2014/02/bread2.jpg)"> <div class="container_16"> <h1>Citas programadas</h1> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <h5 class="title" data-animation=""><strong>Citas programadas</strong></h5> <div class="sc-table sc-dark-header " data-animation=""> <div class="sc-row"> <div class="sc-col">#</div> <div class="sc-col">Fecha</div> <div class="sc-col">Lugar</div> <div class="sc-col">Observaciones</div> </div> <!-- .sc-row --> <div class="sc-row"> <div class="sc-col">1</div> <div class="sc-col">12/05/2015</div> <div class="sc-col">PASEO CASTELLANA 7</div> <div class="sc-col">Lorem Ipsum Dolor</div> </div> <!-- .sc-row --> <div class="sc-row"> <div class="sc-col">2</div> <div class="sc-col">17/05/2015</div> <div class="sc-col">CUATRO CAMINOS</div> <div class="sc-col">Lorem Ipsum</div> </div> <!-- .sc-row --> <div class="sc-row"> <div class="sc-col">3</div> <div class="sc-col">7/06/2015</div> <div class="sc-col">GREGORIO MARAÑÓN</div> <div class="sc-col">Análisis</div> </div> <!-- .sc-row --> <div class="sc-row"> <div class="sc-col">4</div> <div class="sc-col">27/08/2015</div> <div class="sc-col">CUATRO CAMINOS</div> <div class="sc-col">Pilates</div> </div> <!-- .sc-row --> </div> <!-- .sc-table --> <a class="sc-button sc-default sc-orange sc-big fa fa-plus " data-animation="" href="alta_cita.php" style="background-color:#a92037"><span>Pedir cita</span></a> </div> </div> </div> </section> <?php require_once('include/footer.php'); ?> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?></body> </html><file_sep>/app/include/consigue_precio.php <?php echo '<h3><strong>120</strong><small>€</small></h3>'; ?><file_sep>/app/alta_bono.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>Alta bono | CQS</title> <?php require_once('include/header.php'); ?> <script type="text/javascript" src="js/jquery1.js"></script> <script type="text/javascript"> jQuery(document).ready(function(){ jQuery("select[name='tipobono']").change(function(){ var optionValue = jQuery("select[name='tipobono']").val(); jQuery.ajax({ type: "GET", url: "include/precio.php", data: "tipobono="+optionValue+"&status=1", beforeSend: function(){ jQuery("#ajaxLoader").show(); }, complete: function(){ jQuery("#ajaxLoader").hide(); }, success: function(response){ jQuery("#bonoAjax").html(response); jQuery("#bonoAjax").show(); } }); }); }); </script> <script type="text/javascript"> jQuery(document).ready(function(){ jQuery("select[name='tipobono']").change(function(){ var optionValue = jQuery("select[name='tipobono']").val(); jQuery.ajax({ type: "GET", url: "include/observaciones.php", data: "tipobono="+optionValue+"&status=1", beforeSend: function(){ jQuery("#ajaxLoader").show(); }, complete: function(){ jQuery("#ajaxLoader").hide(); }, success: function(response){ jQuery("#bonoAjax1").html(response); jQuery("#bonoAjax1").show(); } }); }); }); </script> </head> <body class="page page-child page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(img/bread2.jpg)"> <div class="container_16"> <h1>Alta bono</h1> <p></p> <div class="page-path"> <div id="crumbs"><span typeof="v:Breadcrumb"> <a rel="v:url" property="v:title" href="index.php">Inicio</a></span> <span class="delimiter">|</span> <span typeof="v:Breadcrumb"> <span class="delimiter">|</span> <span class="current">Pedir bono</span> </div> </div> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <h2 class="title" data-animation="">Pedir bono</h2> <div class="theme-one-fourth"> <div class="sc-icon"> <h5 style="text-align:right">Tipo de bono</h5> </div> </div> <div class="theme-three-fourth theme-column-last"> <select name="tipobono" class="form-control input-medium"> <option value="">Selecciona...</option> <option value="1">Este es un bono de prueba</option> <option value="2">Aquí hay otro bono</option> <option value="3">Y otro</option> <option value="4">Los que queramos</option> <option value="5">El último</option> </select> </div> <div class="clear"></div> <div class="theme-one-fourth"> <div class="sc-icon"> <h5 style="text-align:right">Precio</h5> </div> </div> <div class="theme-three-fourth theme-column-last"> <div id="ajaxLoader" style="display:none"><img src="img/ajax-loader.gif" alt="cargando..."></div> <div id="bonoAjax" style="display:none"> Selecciona un tipo de bono </div> </div> <div class="clear"></div> <div class="theme-one-fourth"> <div class="sc-icon "> <h5 style="text-align:right">Observaciones</h5> </div> </div> <div class="theme-three-fourth theme-column-last"> <div class="sc-testimonial no-corner " data-animation=""> <div id="ajaxLoader" style="display:none"><img src="img/ajax-loader.gif" alt="cargando..."></div> <div id="bonoAjax1" style="display:none"> Selecciona un tipo de bono </div> </div> </div> <div class="theme-one-fourth"> </div> <div class="theme-three-fourth theme-column-last"> <a class="sc-button sc-default sc-orange fa fa-plus " data-animation="" href="#" target="_blank" style="background-color:#a92037"><span>Comprar</span></a> <a class="sc-button sc-default sc-orange fa fa-plus " data-animation="" href="#" target="_blank" style="background-color:#2d3e52"><span>Solicitar</span></a> </div> </div> </div> </div> </section> <?php require_once('include/footer.php'); ?> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?> </body> </html><file_sep>/app/olvido_contra.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>¿Olvidaste la contraseña? | CQS</title> <?php require_once('include/header.php'); ?> </head> <body class="page page-child page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(img/bread2.jpg)"> <div class="container_16"> <h1>¿Olvidaste la contraseña?</h1> <p></p> <div class="page-path"> <div id="crumbs"><span typeof="v:Breadcrumb"> <a rel="v:url" property="v:title" href="index.php">Inicio</a></span> <span class="delimiter">|</span> <span typeof="v:Breadcrumb"> <span class="delimiter">|</span> <span class="current">¿Olvidaste la contraseña?</span> </div> </div> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <div class="woocommerce"> <form method="post" class="lost_reset_password"> <h2 class="title">¿Tienes problemas para iniciar sesión?</h2> <p>¿Has olvidado tu contraseña? Introduce tu dirección de correo electrónico de inicio de sesión. Te enviaremos un mensaje de correo electrónico con un enlace para recuperar tu contraseña.</p> <p class="form-row form-row-first"><label for="user_login"><b>Email *</b></label> <input class="input-text" type="text" placeholder="Introduce tu dirección email" name="user_login" id="user_login"></p> <div class="clear"></div> <a class="sc-button sc-default sc-orange sc-big fa fa-arrow-right" data-animation="" href="#" style="background-color:#a92037; margin-bottom:0px !important; margin-top:20px !important;"><span>Recuperar</span></a> </form> <div class="clear" style="margin:10px 0px;"></div> </div> </div> </div> </div> </section> <?php require_once('include/footer.php'); ?> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?></body> </html><file_sep>/app/index.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>Identificación | CQS</title> <?php require_once('include/header.php'); ?> </head> <body class="page page-child page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(img/bread2.jpg)"> <div class="container_16"> <h1>Identificación</h1> <p></p> <div class="page-path"> <div id="crumbs"><span typeof="v:Breadcrumb"> <a rel="v:url" property="v:title" href="index.php">Inicio</a></span> <span class="delimiter">|</span> <span typeof="v:Breadcrumb"> <span class="delimiter">|</span> <span class="current">Identificación</span> </div> </div> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <?php if(array_key_exists("fail", $_GET)){ ?> <div class="sc-message sc-message-classic sc-message-error " data-animation=""> <span></span> <h3>Por favor, vuelve a introducir tu mail / contraseña.</h3> <div>La contraseña o el mail no son válidos. Por favor, asegúrate de que el bloqueo de mayúsculas no está activado e inténtalo de nuevo.</div> <div>¿Olvidaste la contraseña? <a href="olvido_contra.php">Recuerdame la contraseña.</a></div> <a class="close" href="#"> <i class="fa fa-times"></i> </a> </div> <?php }?> <div class="woocommerce"> <form method="post" id="login_form" class="login" action="service_requests/login.php"> <h2 class="title">Iniciar sesión</h2> <p class="form-row theme-one-half"> <label for="username"><b>Usuario</b> <span class="required">*</span></label> <input type="text" class="input-text" placeholder="Introduce tu nombre de usuario" name="username" id="username"> </p> <p class="form-row theme-one-half theme-column-last"> <label for="password"><b>Contraseña</b> <span class="required">*</span></label> <input class="input-text" type="password" placeholder="<PASSWORD> tu <PASSWORD>" name="password" id="password"> </p> <p class="form-row"> <a id="login" class="sc-button sc-default sc-orange sc-big fa fa-arrow-right" data-animation="" href="#" style="background-color:#a92037; margin-bottom:0px !important; margin-top:20px !important;"><span>Entrar</span></a> </p> </form> </div> </div> </div> </div> </section> <?php require_once('include/footer.php'); ?> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?> <script type="text/javascript"> $("#login").click(function() { $("#login_form").submit(); }); </script> </body> </html><file_sep>/app/include/redirect.php <?php session_start(); if(!array_key_exists("username",$_SESSION)){ if($_SERVER["REQUEST_URI"] != "/index.php"){ redirect("index.php"); } } function redirect($url, $statusCode = 303) { header('Location: ' . $url, true, $statusCode); die(); } ?><file_sep>/app/service_requests/login.php <?php session_start(); require_once "redirect.php"; $username = $_POST["username"]; $password = $_POST["<PASSWORD>"]; $data = array("usr" => $username, "pwd" => $password); $data = enviar("login", $data); $responses = json_decode($data,true); if($responses["ret"] == "ERROR"){ if($responses["code"] == "UMCP"){ $_SESSION["username"] = $username; $_SESSION["password"] = $<PASSWORD>; redirect("../index_chg_pwd.php"); }else{ redirect("../index.php?fail=1"); } }else{ $_SESSION["username"] = $username; $_SESSION["password"] = $<PASSWORD>; if($responses["data"]["status"] == "PASSWORD_REST"){ redirect("../index_chg_pwd.php"); }else{ $_SESSION["pat_id"] = $responses["data"]["user_info"]["id"]; redirect("../citas.php"); } } ?><file_sep>/app/bonos.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>Bonos | CQS</title> <?php require_once('include/header.php'); ?> </head> <body class="page page-child page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(img/bread2.jpg)"> <div class="container_16"> <h1>Bonos disponibles</h1> <p></p> <div class="page-path"> <div id="crumbs"><span typeof="v:Breadcrumb"> <a rel="v:url" property="v:title" href="index.php">Inicio</a></span> <span class="delimiter">|</span> <span typeof="v:Breadcrumb"> <span class="delimiter">|</span> <span class="current">Bonos</span> </div> </div> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <h5 class="title" data-animation=""><strong>Bonos</strong></h5> <div id="tabla_bonos" class="sc-table sc-dark-header " data-animation=""> <div class="sc-row"> <div class="sc-col">Bono</div> <div class="sc-col">Fecha Alta</div> <div class="sc-col">Fecha Límite</div> <div class="sc-col">Estado</div> <div class="sc-col">Disponibles</div> </div> </div> <!-- .sc-table --> <!-- <a class="sc-button sc-default sc-orange sc-big fa fa-plus " data-animation="" href="alta_bono.php" style="background-color:#a92037"><span>Alta bono</span></a> --> </div> </div> </div> </section> <?php require_once('include/footer.php'); ?> <script type="text/javascript"> $(document).ready(function() { $.ajax({ url: "service_requests/cargar_bonos.php", protocol: "http", type: "POST", data: { patient: $("#usr_id").val() }, dataType: "JSON", error: function () { var trHTML = ''; trHTML += '<div class="sc-row"> No hay bonos </div>' $('#tabla_bonos').append(trHTML); }, success: function (jsonStr) { var i = 0; var trHTML = ''; $.each(jsonStr.data.rows, function (i, item) { var disponibles = item.ntickets - item.nused; i++; trHTML += ' <div class="sc-row"><div class="sc-col">' + item.name + '</div> <div class="sc-col">' + item.dtcrea + '</div> <div class="sc-col">' + item.dtexpire +'</div> <div class="sc-col">' +item.status +'</div> <div class="sc-col">' + disponibles+ '</div></div>' }); if(i == 0){ trHTML += '<div class="sc-row"> No hay bonos </div>' } $('#tabla_bonos').append(trHTML); } }); }); </script> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?> </body> </html><file_sep>/app/cerrar_sesion.php <?php require_once "redirect.php"; session_destroy(); redirect("../index.php"); ?><file_sep>/app/include/header.php <?php require_once "redirect.php"; ?> <script type="text/javascript" src="http://code.jquery.com/jquery-1.9.1.js"></script> <link rel='stylesheet' id='reset-css' href='css/reset.css' type='text/css' media='all' /> <link rel='stylesheet' id='960_16_col-css' href='css/960_16_col.css' type='text/css' media='all' /> <link rel='stylesheet' id='text-css' href='css/text.css' type='text/css' media='all' /> <link rel='stylesheet' id='prettyPhoto-css' href='css/prettyPhoto.css' type='text/css' media='all' /> <link rel='stylesheet' id='font-awesome-css' href='font-awesome/css/font-awesome.min.css' type='text/css' media='all' /> <link rel='stylesheet' id='icomoon-css' href='css/icomoon.css' type='text/css' media='all' /> <link rel='stylesheet' id='shortcodes-css' href='css/shortcodes.css' type='text/css' media='all' /> <link rel='stylesheet' id='main-css' href='css/main.css' type='text/css' media='all' /> <link rel='stylesheet' id='widgets-css' href='css/widgets.css' type='text/css' media='all' /> <link rel='stylesheet' id='responsiveness-css' href='css/media.css' type='text/css' media='all' /> <link rel='stylesheet' id='videojs-styles-css' href='css/video-js.min.css' type='text/css' media='all' /> <link rel='stylesheet' id='c3-css' href='css/c3.css' type='text/css' media='all' /> <link rel='stylesheet' id='animate-css' href='css/animate.css' type='text/css' media='all' /> <link rel='stylesheet' id='custom' href='css/custom.css' type='text/css' media='all' /> <link rel='stylesheet' id='tp-lato-css' href='http://fonts.googleapis.com/css?family=Lato%3A300%2C400%2C700&#038;ver=3.9.2' type='text/css' media='all' /> <link rel='stylesheet' id='woocommerce_chosen_styles-css' href='css/chosen.css' type='text/css' media='all' /> <link rel='stylesheet' id='woocommerce-layout-css' href='css/woocommerce-layout.css' type='text/css' media='all' /> <link rel='stylesheet' id='woocommerce-smallscreen-css' href='css/woocommerce-smallscreen.css' type='text/css' media='only screen and (max-width: 768px)' /> <link rel='stylesheet' id='woocommerce-general-css' href='css/woocommerce.css' type='text/css' media='all' /> <script type='text/javascript' src='js/jquery.js'></script> <script type='text/javascript' src='js/jquery-migrate.min.js'></script> <link href="http://fonts.googleapis.com/css?family=Open+Sans:100,200,300,300italic,400italic,600italic,700italic,800italic,400,500,600,700,800,900" rel="stylesheet" type="text/css"> <link href="http://fonts.googleapis.com/css?family=Lato:100,200,300,300italic,400italic,600italic,700italic,800italic,400,500,600,700,800,900" rel="stylesheet" type="text/css"> <link href="http://fonts.googleapis.com/css?family=Raleway:100,200,300,300italic,400italic,600italic,700italic,800italic,400,500,600,700,800,900" rel="stylesheet" type="text/css"><file_sep>/app/index_chg_pwd.php <!DOCTYPE html> <html lang="es-ES"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, user-scalable=0"> <title>Identificación | CQS</title> <?php require_once('include/header.php'); ?> </head> <body class="page page-child page-template-default w1170 no-header-image no-sidebar"> <div id='top'></div> <div class="page-wrapper"> <?php require_once('include/menu_principal.php'); ?> <section id="header-space"></section> <section id="page-header" class="style2 white" style="background-image: url(img/bread2.jpg)"> <div class="container_16"> <h1>Identificación</h1> <p></p> <div class="page-path"> <div id="crumbs"><span typeof="v:Breadcrumb"> <a rel="v:url" property="v:title" href="index.php">Inicio</a></span> <span class="delimiter">|</span> <span typeof="v:Breadcrumb"> <span class="delimiter">|</span> <span class="current">Identificación</span> </div> </div> </div> </section> <section id="main" class="container_16"> <div id="post-body" class="blog "> <div id="post-body-padding"> <div class="post-text-full"> <?php if(array_key_exists("fail", $_GET)){ ?> <div class="sc-message sc-message-classic sc-message-error " data-animation=""> <span></span> <h3>Por favor, cambie su contraseña.</h3> <div>La contraseña debe ser cambiada para mayor seguridad. Por favor, introduzca una nueva.</div> </div> <?php }?> <div class="woocommerce"> <form method="post" id="login_form" class="login" action="service_requests/chg_pwd.php"> <h2 class="title">Cambio de contraseña</h2> <p class="form-row theme-one-half"> <label for="password"><b>Nueva Contraseña</b> <span class="required">*</span></label> <input type="password" class="input-text" placeholder="Intro<PASSWORD> tu nueva contraseña" name="password" id="password"> </p> <p class="form-row theme-one-half theme-column-last"> <label for="password2"><b>Contraseña</b> <span class="required">*</span></label> <input class="input-text" type="<PASSWORD>" placeholder="Repite tu contraseña" name="password2" id="password2"> </p> <p class="form-row"> <a id="login" class="sc-button sc-default sc-orange sc-big fa fa-arrow-right" data-animation="" href="#" style="background-color:#a92037; margin-bottom:0px !important; margin-top:20px !important;"><span>Entrar</span></a> </p> </form> </div> </div> </div> </div> </section> <?php require_once('include/footer.php'); ?> </div> <div class="media_for_js"></div> <?php require_once('include/javascript.php'); ?> <script type="text/javascript"> $("#login").click(function() { if( $("#password").value == $("#password2").value){ $("#login_form").submit(); }else{ alert("Las contraseñas deben coincidir"); } }); </script> </body> </html><file_sep>/app/service_requests/service.php <?php function enviar($dir, $data){ $url_arr = direcciones(); $url = $url_arr[$dir]; $ch = curl_init(); $post_values =json_encode( $data ); curl_setopt($ch, CURLOPT_URL, 'http://172.16.31.10'.$url); curl_setopt($ch, CURLOPT_POST, 1); curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1); curl_setopt($ch, CURLOPT_POSTFIELDS, $post_values); curl_setopt($ch, CURLOPT_HTTPHEADER, array('Content-Type: application/json')); $data = curl_exec($ch); header('Content-type: application/json; charset=utf-8'); echo($data); curl_close($ch); return $data; } function direcciones(){ $dirs = array( "login" => ":8051/ext/login/", "change_password" => ":8051/ext/change_pwd/", "cargar_bonos" => ":8020/management/load_bonos/", "cargar_citas" => ":8020/calendar/load_patient/", ); return $dirs; } ?><file_sep>/app/include/precio.php <?php $tipobono = $_GET['tipobono']; if(!$tipobono) { return false; } $precios = array( 1 => array('120'), 2 => array('150'), 3 => array('255'), 4 => array('70'), 5 => array('55') ); $precioactual = $precios[$tipobono]; ?> <?php foreach($precioactual as $precio) { ?> <h3><strong><?php echo $precio; ?></strong><small>€</small></h3> <?php } ?> <file_sep>/app/service_requests/chg_pwd.php <?php require_once "redirect.php"; session_start(); $password2 = $_POST["<PASSWORD>2"]; $password = $_SESSION["<PASSWORD>"]; $username = $_SESSION["username"]; $data = array("usr" => $username, "old_pwd" => $password, "new_pwd" => $<PASSWORD>); $data = enviar("change_password", $data); $responses = json_decode($data,true); if($responses["ret"] == "ERROR"){ redirect("../index.php?fail=1"); }else{ redirect("../citas.php"); } ?>
af364ae8694fc3dc5a6e464ccf2c8201320bfb0f
[ "PHP" ]
19
PHP
loko64z/cqs
254b3c99f555cf118c73f81c0139de198cd94ad0
bf371031398e2a4b66f3f80bc47227136641c62a
refs/heads/master
<file_sep>import * as prod from './prod'; import * as local from './local'; const envs = { prod, local, }; export default envs[process.env.REACT_APP_ENV.trim()] || envs.dev; <file_sep>import styled from 'styled-components'; // eslint-disable-line import/no-extraneous-dependencies const ProductStyled = styled.div` display: flex; padding: 12px; border-radius: 3px; border: 1px solid #eee; > div { width: 100%; } .price { text-align: right; display: block; margin-top: 15px; } &:not(:last-of-type) { margin-bottom: 15px; } img { margin-right: 10px; } `; export default ProductStyled; <file_sep>import styled from 'styled-components'; // eslint-disable-line import/no-extraneous-dependencies const AlertaStyled = styled.div` > div { background: rgba(255, 255, 255, 0.7); background-size: cover; width: 100vw; height: 100vh; position: fixed; top: 0; left: 0; z-index: 9998; display: flex; align-items: center; justify-content: flex-start; color: black; text-align: center; padding-left: 40px; padding-right: 40px; > div { width: 100%; } div { margin: 20px 0; } } `; export default AlertaStyled; <file_sep>import React from 'react'; import { action } from '@storybook/addon-actions'; import Input from '../components/Input'; export default { title: 'Input', component: Input, }; export const Change = () => <Input onChange={action('typing')} />; export const Disabled = () => <Input disabled />; export const Mask = () => ( <Input onChange={action('typing')} mask="99/99/9999" maskChar="_" /> ); export const Placeholder = () => ( <Input onChange={action('typing')} placeholder="Placeholder" /> ); <file_sep>export const maskMoneyPtBr = number => parseFloat(number).toLocaleString('pt-br', { minimumFractionDigits: 2, maximumFractionDigits: 2, }); <file_sep>export const post = (url, params) => { const config = { method: 'post', headers: new Headers({ Authorization: 'Basic dHV2OjhIM3dJR3Vib2JMRkVzMw==', 'content-type': 'application/json', }), }; if (params) { config.body = JSON.stringify(params); } return fetch(url, config).then((response) => { if (!response.ok) { throw Error(response); } return response.json(); }); }; export const get = (url, params) => { const config = { method: 'get', headers: new Headers({ Authorization: 'Basic dHV2OjhIM3dJR3Vib2JMRkVzMw==', 'content-type': 'application/json', }), }; return fetch( `${url}?${ params ? Object.keys(params) .map(item => `${item}=${params[item]}`) .join('&') : '' }`, config, ).then((response) => { if (!response.ok) { throw Error(response); } return response.json(); }); }; <file_sep>import React from 'react'; import Text from '../components/Text'; export default { title: 'Text', component: Text, }; export const Normal = () => <Text>Hello Button</Text>; export const Size = () => <Text size={'40px'}>Hello Button</Text>; export const Bold = () => ( <Text bold size={'40px'}> Hello Text </Text> ); export const Color = () => ( <Text color="grey" size={'40px'}> Hello Text </Text> ); export const Uppercase = () => ( <Text uppercase color="grey" size={'40px'}> Hello Text </Text> ); export const Center = () => ( <Text center size={'40px'}> Hello Text </Text> ); <file_sep>import { combineReducers } from 'redux'; import { connectRouter } from 'connected-react-router'; import alerta from 'store/alerta/reducer'; import loading from 'store/loading/reducer'; import cart from 'store/cart/reducer'; export default history => combineReducers({ loading, alerta, cart, router: connectRouter(history), }); <file_sep>import React from 'react'; import Text from 'components/Text'; import { maskMoneyPtBr } from '../../utils/mask'; import ProductStyled from './style'; const Product = props => { const { item } = props; return ( <ProductStyled data-cy="products"> <img src={item.product.imageObjects[0].thumbnail} alt={item.product.name} /> <div> <Text size="13px">{item.product.name}</Text> {props.showPrice ? ( <Text size="13px" bold className="price"> {`R$ ${maskMoneyPtBr(item.product.priceSpecification.price)}`} </Text> ) : null} </div> </ProductStyled> ); }; export default Product; <file_sep>import React from 'react'; import { Col } from 'react-grid-system'; import { useSelector } from 'react-redux'; import Text from 'components/Text'; import Card from 'components/Card'; import { maskMoneyPtBr } from '../../utils/mask'; import CheckoutStyled from './style'; const Checkout = () => { const data = useSelector(state => state.cart.data); return ( <> {data.items.length > 0 ? ( <Col lg={4} xs={12} data-cy="checkout"> <CheckoutStyled> <Card transparent> <div className="checkout"> <Text size="14px" uppercase color="#212122"> Produtos </Text> <Text size="14px" uppercase color="#212122"> {`R$ ${maskMoneyPtBr(data.subTotal)}`} </Text> </div> <div className="checkout"> <Text size="14px" uppercase color="#212122"> Frete </Text> <Text size="14px" uppercase color="#212122"> {`R$ ${maskMoneyPtBr(data.shippingTotal)}`} </Text> </div> <div className="checkout"> <Text size="14px" uppercase color="#FF7800"> Desconto </Text> <Text size="14px" uppercase color="#FF7800"> {`- R$ ${maskMoneyPtBr(data.discount)}`} </Text> </div> <div className="checkout total"> <Text size="14px" uppercase bold color="#212122"> Total </Text> <Text size="14px" uppercase bold color="#212122"> {`R$ ${maskMoneyPtBr(data.total)}`} </Text> </div> </Card> </CheckoutStyled> </Col> ) : null} </> ); }; export default Checkout; <file_sep># bnw-test-front [![MIT license](http://img.shields.io/badge/license-MIT-brightgreen.svg)](http://opensource.org/licenses/MIT) [![Website shields.io](https://img.shields.io/website-up-down-green-red/http/shields.io.svg)](https://boring-northcutt-2a3760.netlify.com/) [![Maintenance](https://img.shields.io/badge/Maintained%3F-no-red.svg)]() Teste de front-end arquitetado para a empresa Beleza na Web contendo 3 passos (rotas). 1. Rota de produtos no carrinho (cart). 2. Rota de pagamento onde digitamos os dados do cartao (payment). 3. Rota de sucesso ao submeter o pagamento (success). - Caso atualize a página na segunda step ou ultima, ao inves de recarregar os dados da api que preciso volto o usuário para o começo do checkout, entendo que quando a compra foi realizada o usuário nao pode atualizar a pagina e voltar no mesmo lugar. - O projeto esta rodando [aqui](https://boring-northcutt-2a3760.netlify.com/cart) As rotas foram divididas em bundles diferentes usando [Lazy e Suspense](https://pt-br.reactjs.org/docs/code-splitting.html). ## Geral Primeiramente me desculpem pelo projeto enorme, entendo que o teste é bem simples e daria para ser feito sem todas as "firulas" que pus, mas quis mostrar um pouco do meu dia-a-dia e conhecimento se tratando de arquitetura de projetos front-end com o react. Para testes usei a lib [Cypress](https://www.cypress.io/), escolhi ela pois é a unica que tive experiencia até hoje, e para o lint usei o [eslint](https://eslint.org/) junto com o [prettier](https://prettier.io/) para padrao de escrita de código. Fiz um [storybook](https://storybook.js.org/) com alguns componentes e exemplos criados no projeto para facilitar a vida de outros devs que precisem mexer na aplicaçao. ## Libs utilizadas O projeto foi iniciado com o [CRA](https://github.com/facebook/create-react-app), utilizando [redux](https://www.npmjs.com/package/redux) junto ao [redux-thunk](https://www.npmjs.com/package/redux-thunk) como middleweare para gerenciamento de estados. Utilizo o [prop-types](https://www.npmjs.com/package/prop-types) para tipar os dados retornados nos componentes. Como lifecycle do react estou utilizando [React Hooks](https://pt-br.reactjs.org/docs/hooks-intro.html), e [react-redux](https://www.npmjs.com/package/react-redux) para facilitar a conversa entra os hooks e o redux. (Utilizei classes no componente Alerta para mostrar conhecimento nos dois lifecycles, os outros componentes estao todos escritos com hooks). Utilizo o [framer-motion](https://www.framer.com/motion/) para para fazer a animaçao da janela de alerta padrao da aplicaçao, as outras animaçoes estou utilizando a lib css [animate-css](https://daneden.github.io/animate.css/). Voces podem testar a janela de alerta mudando o endpoint para um que nao exista, assim a janela vai ser mostrada na tela assim que o erro for retornado. Utilizo o [styled-components](https://www.npmjs.com/package/styled-components) como css in js no projeto todo, menos nos arquivos globais como libs css, fontes e style global. Utilizei também a lib [react-input-mask](https://www.npmjs.com/package/react-input-mask) para abstrair as masks no input. As validaçoes de cartao de credito fiz com regex, estou validando visa, master e dinners, nao utilizo libs. Os erros sao imputados no componente feitos a mao tambem. Para a grid utilizei a lib [react-grid-system](https://www.npmjs.com/package/react-grid-system) que uso como padrao normalmente. Para requisiçao http usei o [isomorphic-fetch](https://www.npmjs.com/package/isomorphic-fetch). ### Instalaçao ``` yarn ``` ### Rodando Prod Unix: ``` yarn start ``` Windows: ``` yarn start:win ``` ### Rodando Dev Unix: ``` yarn start:local ``` Windows: ``` yarn start:win:local ``` ### Rodando Build Unix: ``` yarn build ``` Windows: ``` yarn build:win ``` ### Rodando Storybook ``` yarn storybook ``` ### Rodando Tests Para executar no terminal: ``` yarn test ``` Para executar no navegador: ``` yarn test:open ``` ## Pre-commit Ao tentar realizar o commit rodamos o lint (eslint, prettier), caso o lint passe o commit é realizado, caso algum lint falhe o commit é cancelado. Pre-commit feito com husky e lint-staged. ## Deploy No deploy da aplicaçao usei o CI do [Netlify](https://www.netlify.com/), com um repositório privado no meu github. O build é feito automaticamente quando algum push é feito na master deste repositório. ## Consideraçoes Espero que gostem do projeto e que eu possa ajuda-los a construir coisas legais em breve! Abraços :) <file_sep>import styled from 'styled-components'; // eslint-disable-line import/no-extraneous-dependencies const CheckoutStyled = styled.div` .checkout { display: flex; justify-content: space-between; &:not(:last-child) { margin-bottom: 10px; } } .total { margin-top: 20px; } .item { display: flex; padding: 12px; border-radius: 3px; border: 1px solid #eee; > div { width: 100%; } .price { text-align: right; display: block; margin-top: 15px; } &:not(:last-child) { margin-bottom: 15px; } img { margin-right: 10px; } } `; export default CheckoutStyled; <file_sep>/* eslint-env browser */ import React, { useEffect } from 'react'; import { connect, useDispatch, useSelector } from 'react-redux'; import { Container, Row, Col } from 'react-grid-system'; import { Link } from 'react-router-dom'; import { getCartData } from 'store/cart/actions'; import DefaultLayout from 'layouts/default'; import Text from 'components/Text'; import Button from 'components/Button'; import Product from 'components/Product'; import Card from 'components/Card'; import Checkout from 'components/Checkout'; import ContainerStyled from '../style'; const Cart = () => { useEffect(() => { document.title = `Beleza na Web - Sacola`; }, []); const dispatch = useDispatch(); const data = useSelector(state => state.cart.data); useEffect(() => { if (!data) { dispatch(getCartData()); } }, [dispatch, data]); return ( <ContainerStyled className="animated fadeIn faster"> <Container> {data.items && data.items.length > 0 ? ( <Row> <Col lg={8} xs={12}> <Text size="14px" className="product-text" uppercase bold color="#999" > Produtos </Text> <Card> {data.items.map(item => ( <Product showPrice key={item.product.sku} item={item} /> ))} </Card> </Col> <Checkout data={data} /> <Col xs={12}> <Link to="/payment" className="animated fadeIn" data-cy="buttonCart" > <Button>Seguir para o pagamento</Button> </Link> </Col> </Row> ) : ( <Text>Sem items no carrinho.</Text> )} </Container> </ContainerStyled> ); }; export default DefaultLayout(connect()(Cart)); <file_sep>import React from 'react'; import { action } from '@storybook/addon-actions'; import Button from '../components/Button'; export default { title: 'Button', component: Button, }; export const Clicked = () => ( <Button onClick={action('clicked')}>Hello Button</Button> ); export const Disabled = () => <Button disabled>Hello Button</Button>; export const FullWidth = () => <Button fullWidth>Hello Button</Button>; export const Center = () => <Button center>Hello Button</Button>; <file_sep>import React from 'react'; import { useSelector } from 'react-redux'; import PropTypes from 'prop-types'; import LoadingStyled from './style'; const Loading = () => { const loading = useSelector(state => state.loading.isLoading); return ( loading && ( <LoadingStyled key="loading" className="animated faster fadeIn"> <div className="lds-ellipsis" data-cy="loading"> <div></div> <div></div> <div></div> <div></div> </div> </LoadingStyled> ) ); }; Loading.propTypes = { loading: PropTypes.bool, }; export default Loading; <file_sep>import styled from 'styled-components'; // eslint-disable-line import/no-extraneous-dependencies const ButtonStyled = styled.button` margin: ${props => (props.center ? '0 auto' : '')}; border-radius: 3px; display: ${props => (props.center ? 'flex' : 'initial')}; justify-content: center; padding: 18px 20px; border: 0; text-align: center; font-size: 20px; text-transform: uppercase; background-color: #ff6c00; outline: none; font-weight: bold; text-align: center; width: ${props => (props.fullWidth ? '100%' : 'auto')}; color: white; box-shadow: inset 0 -3px 0 0 #d45a00, 0 2px 4px 0 rgba(0, 0, 0, 0.25); transition: all 0.3s ease; cursor: ${props => (props.disabled ? 'not-allowed' : 'pointer')}; &:hover { background-color: #d45a00; box-shadow: none; } &:disabled { opacity: ${props => (props.disabled ? '0.5' : '1')}; &:hover { background-color: #ff6c00; box-shadow: inset 0 -3px 0 0 #d45a00, 0 2px 4px 0 rgba(0, 0, 0, 0.25); } } @media screen and (max-width: 768px) { width: 100%; } `; export default ButtonStyled; <file_sep>import fetch from 'isomorphic-fetch'; export const callService = (url, method, params, customheader = {}) => { const headers = { 'Content-Type': 'application/json', 'Accept-Charset': 'utf-8', ...customheader, }; const configHeaders = { method, headers, }; if (method !== 'GET') { configHeaders.body = JSON.stringify(params); } return new Promise((resolve, reject) => fetch(url, configHeaders) .then(response => { response .json() .then(json => { if (response.ok) { resolve(json); } else { reject(json); } }) .catch(e => reject(e)); }) .catch(message => reject(message)) ); }; <file_sep>import React from 'react'; import ReactDOM from 'react-dom'; import { store } from 'store/Redux'; import { Provider } from 'react-redux'; import * as serviceWorker from 'serviceWorker'; import 'utils/css/fonts.css'; import 'utils/css/global.css'; import 'utils/css/animate.css'; import App from 'App'; ReactDOM.render( <Provider store={store}> <App /> </Provider>, document.getElementById('root') ); serviceWorker.register(); <file_sep>import React from 'react'; import CardStyled from './style'; const Card = props => ( <CardStyled className="animated fadeIn" transparent={props.transparent && props.transparent} > {props.children && props.children} </CardStyled> ); export default Card; <file_sep>import styled from 'styled-components'; // eslint-disable-line import/no-extraneous-dependencies const LoadingStyled = styled.div` .lds-ellipsis { display: inline-block; position: relative; width: 80px; height: 80px; } .lds-ellipsis div { position: absolute; top: 33px; width: 13px; height: 13px; border-radius: 50%; background: #ff6c00; animation-timing-function: cubic-bezier(0, 1, 1, 0); } .lds-ellipsis div:nth-child(1) { left: 8px; animation: lds-ellipsis1 0.6s infinite; } .lds-ellipsis div:nth-child(2) { left: 8px; animation: lds-ellipsis2 0.6s infinite; } .lds-ellipsis div:nth-child(3) { left: 32px; animation: lds-ellipsis2 0.6s infinite; } .lds-ellipsis div:nth-child(4) { left: 56px; animation: lds-ellipsis3 0.6s infinite; } @keyframes lds-ellipsis1 { 0% { transform: scale(0); } 100% { transform: scale(1); } } @keyframes lds-ellipsis3 { 0% { transform: scale(1); } 100% { transform: scale(0); } } @keyframes lds-ellipsis2 { 0% { transform: translate(0, 0); } 100% { transform: translate(24px, 0); } } background: rgba(255, 255, 255, 0.7); background-size: cover; width: 100vw; height: 100vh; position: fixed; top: 0; left: 0; z-index: 9999; display: flex; flex-direction: column; align-items: center; justify-content: center; text-align: center; `; export default LoadingStyled; <file_sep>import React from 'react'; import { connect } from 'react-redux'; import Loading from 'components/Loading'; import Alerta from 'components/Alerta'; import Header from 'components/Header'; const Layout = Content => { const LayoutConnected = () => ( <> <Alerta /> <Loading /> <Header /> <Content /> </> ); return connect()(LayoutConnected); }; export default Layout; <file_sep>import React from 'react'; import PropTypes from 'prop-types'; import ButtonStyled from './style'; const Button = props => ( <ButtonStyled fullWidth={props.fullWidth && props.fullWidth} type={props.type ? props.type : 'text'} onClick={props.onClick && props.onClick} center={props.center && props.center} disabled={props.disabled && props.disabled} > {props.children && props.children} </ButtonStyled> ); Text.propTypes = { fullWidth: PropTypes.bool, type: PropTypes.string, center: PropTypes.bool, disabled: PropTypes.bool, onClick: PropTypes.func, }; export default Button; <file_sep>export const SET_CART_DATA = 'SET_CART_DATA'; export const SET_CARD_DATA = 'SET_CARD_DATA'; <file_sep>describe('Cart', () => { before(() => { cy.visit('http://localhost:3000'); }); it('should url path', () => { cy.url().should('include', '/cart'); }); it('should loading exist & get request', () => { cy.get('[data-cy=loading]').should('exist'); cy.request('https://www.mocky.io/v2/5b15c4923100004a006f3c07'); }); it('should header exist & steps too', () => { cy.get('[data-cy=header]').should('exist'); cy.get('[data-cy=textCart]').should('exist'); cy.get('[data-cy=textPayment]').should('exist'); cy.get('[data-cy=textSuccess]').should('exist'); cy.get('[data-cy=textCart] > div').should( 'have.css', 'color', 'rgb(255, 120, 0)' ); }); it('should products exist', () => { cy.get('[data-cy=products]').should('exist'); cy.get('[data-cy=products]').should('have.length', 3); }); it('should checkout exist', () => { cy.get('[data-cy=checkout]').should('exist'); }); it('should button exist & enabled', () => { cy.get('[data-cy=buttonCart]').should('exist'); cy.get('[data-cy=buttonCart] > button').should('be.enabled'); cy.get('[data-cy=buttonCart]').click(); }); }); describe('Payment', () => { it('should url path', () => { cy.url().should('include', '/payment'); }); it('should header exist & steps too', () => { cy.get('[data-cy=header]').should('exist'); cy.get('[data-cy=textCart]').should('exist'); cy.get('[data-cy=textPayment]').should('exist'); cy.get('[data-cy=textSuccess]').should('exist'); cy.get('[data-cy=textPayment] > div').should( 'have.css', 'color', 'rgb(255, 120, 0)' ); }); it('should button exist & disabled', () => { cy.get('[data-cy=buttonPayment]').should('exist'); cy.get('[data-cy=buttonPayment] > button').should('be.disabled'); }); it('should credit card exist & fake card', () => { cy.get('[data-cy=card]') .should('exist') .clear() .type('0000000000000000'); }); it('should credit card exist & incomplete', () => { cy.get('[data-cy=card]') .clear() .type('555555'); }); it('should credit card exist & real card', () => { cy.get('[data-cy=card]') .clear() .type('5287115761908655'); }); it('should name exist & typing', () => { cy.get('[data-cy=name]') .should('exist') .clear() .type('<NAME>'); }); it('should validity exist & incomplete', () => { cy.get('[data-cy=validity]') .should('exist') .clear() .type('111'); }); it('should validity complete', () => { cy.get('[data-cy=validity]') .clear() .type('122027'); }); it('should ccv exist & incomplete', () => { cy.get('[data-cy=ccv]') .should('exist') .clear() .type('11'); }); it('should ccv complete', () => { cy.get('[data-cy=ccv]') .clear() .type('404'); }); it('should checkout exist', () => { cy.get('[data-cy=checkout]').should('exist'); }); it('should button enabled', () => { cy.get('[data-cy=buttonPayment] > button').should('be.enabled'); }); it('should button click', () => { cy.get('[data-cy=buttonPayment]').click(); }); }); describe('Success', () => { it('should url path', () => { cy.url().should('include', '/success'); }); it('should header exist & steps too', () => { cy.get('[data-cy=header]').should('exist'); cy.get('[data-cy=textCart]').should('exist'); cy.get('[data-cy=textPayment]').should('exist'); cy.get('[data-cy=textSuccess]').should('exist'); cy.get('[data-cy=textSuccess] > div').should( 'have.css', 'color', 'rgb(255, 120, 0)' ); }); it('should success message exist and match', () => { cy.get('[data-cy=successMessage]').should('exist'); }); it('should payment info exist', () => { cy.get('[data-cy=successCard]') .should('exist') .contains('8655'); cy.get('[data-cy=successName]') .should('exist') .contains('<NAME>'); cy.get('[data-cy=successValidity]') .should('exist') .contains('12/2027'); }); it('should products exist', () => { cy.get('[data-cy=products]').should('exist'); cy.get('[data-cy=products]').should('have.length', 3); }); it('should checkout exist', () => { cy.get('[data-cy=checkout]').should('exist'); }); }); <file_sep>import React from 'react'; import Text from 'components/Text'; import { Container } from 'react-grid-system'; import HeaderStyled from './style'; const Header = () => { return ( <HeaderStyled key="header" className="animated fadeIn faster"> <Container data-cy="header"> <ul> <li data-cy="textCart"> <Text bold center size="13px" uppercase color={window.location.pathname === '/cart' ? '#FF7800' : '#CCC'} > Sacola </Text> </li> <li data-cy="textPayment"> <Text bold center size="13px" uppercase color={ window.location.pathname === '/payment' ? '#FF7800' : '#CCC' } > Pagamento </Text> </li> <li data-cy="textSuccess"> <Text bold center size="13px" uppercase color={ window.location.pathname === '/success' ? '#FF7800' : '#CCC' } > Confirmaçao </Text> </li> </ul> </Container> </HeaderStyled> ); }; export default Header; <file_sep>export const BASE_URL = 'https://www.mocky.io/v2'; <file_sep> import * as LoadingActions from '../store/loading/actions'; import * as LoadingActionTypes from '../store/loading/actionTypes'; const loadingMiddleware = store => next => (action) => { if (action.loading !== undefined && action.type !== LoadingActionTypes.SET_LOADING) { store.dispatch(LoadingActions.setLoading(action.loading)); } next(action); }; export default loadingMiddleware; <file_sep>import * as types from './actionTypes'; export const setLoading = loading => ({ type: types.SET_LOADING, loading, }); <file_sep>import React, { Component } from 'react'; import { push } from 'connected-react-router'; import { connect } from 'react-redux'; import { Link } from 'react-router-dom'; import { motion } from 'framer-motion'; import PropTypes from 'prop-types'; import Text from 'components/Text'; import Button from 'components/Button'; import * as AlertActions from 'store/alerta/actions'; import AlertaStyled from './style'; class Alerta extends Component { render() { const menu_variants = { open: { opacity: 1, y: 0, transition: 'all .2s ease' }, hidden: { opacity: 0, y: -2000, transition: 'all .2s ease' }, }; const { alerta, toggle } = this.props; return ( <AlertaStyled key="alerta"> <motion.div onClick={() => toggle({ show: false, route: alerta.route, }) } initial="hidden" animate={alerta.alert ? 'open' : 'hidden'} variants={menu_variants} > <div> <Text center size="30px"> {alerta.title ? alerta.title : ''} </Text> <Text size="14px" center> {alerta.description ? alerta.description : ''} </Text> <Link to={alerta.route}> <Button type="text" center> ok </Button> </Link> </div> </motion.div> </AlertaStyled> ); } } Alerta.propTypes = { alerta: PropTypes.object, }; const mapStateToProps = state => ({ alerta: state.alerta }); const mapDispatchToProps = dispatch => ({ toggle: data => { dispatch(AlertActions.setAlert(data)); dispatch(push(data.route)); }, }); export default connect(mapStateToProps, mapDispatchToProps)(Alerta); <file_sep>import * as types from './actionTypes'; export const setAlert = data => ({ type: types.SET_ALERT, payload: data, });
987528d7b1a90fb586946169c660b70be9253d81
[ "JavaScript", "Markdown" ]
30
JavaScript
juliocarneiro/test-front
6efe531e83a744ddd90bd298d2c7ac8fcb3708a4
acad2e32d77c9aad568f27a553b5e0c80b493090
refs/heads/master
<file_sep>import { AnyAction, Store, Dispatch } from 'redux' export default (store: Store<any>) => (next: Dispatch<any>) => (action: any) => { const { payload: _payload, type } = action const payload = _payload ? _payload : {} switch(type) { case 'INSTALLATION_PROGRESS': /** Ignore, this spams WAY too much in the console */ break case 'ADD_FUNCTION': console.log(`actions#${type}('NO_ID') ${payload.url} -> ${payload.repo}`) break case 'REDUX_STORAGE_LOAD': console.log('actions#STATE_LOAD([Object])') break case 'REDUX_STORAGE_SAVE': console.log('actions#STATE_SAVE([Object])') break default: if(payload.error) console.warn(payload.error) console.log(`actions#${type}(${payload.id || payload.error || 'NO_ID'})`) break } return next(action) }<file_sep># servicer A nodejs-based FaaS orchestration manager. Every function running in isolated required modules Please note, this is still just a proof of concept. The package is not yet safe to use and the context should be better isolated. Usage guidelines will come in the next days <file_sep>import { AnyAction, Store, Dispatch } from 'redux' import { connections } from '../socket' export default (store: Store<any>) => (next: Dispatch<any>) => (action: any) => { connections.forEach(socket => socket.sendMessage(action)) return next(action) }<file_sep>export const NotFound = (req, res) => { /** Send the 404 state */ res.writeHead(404, { 'Content-Type': 'text/json' }) res.end(JSON.stringify({ error: 404 })) }<file_sep>import { createStore, applyMiddleware, combineReducers } from 'redux' import { readFile as _readFile, writeFile as _writeFile } from 'fs' import { reducer as polyfillReducers, createMiddleware, createLoader } from 'redux-storage' /** Storage helpers */ import { join } from 'path' import * as pify from 'pify' /** Reducers */ import FunctionsReducer from './reducers/functions' /** Middlewares and helpers */ import debounceStorage from 'redux-storage-decorator-debounce' import LoggerMiddleware from './middlewares/logger' import SocketMiddleware from './middlewares/socket' const readFile = pify(_readFile) const writeFile = pify(_writeFile) const cachePath = join(process.cwd(), 'data.json') const reducers = combineReducers({ functions: FunctionsReducer }) const reducer = polyfillReducers(reducers) const engine = debounceStorage({ async load() { const result = await readFile(cachePath) return JSON.parse(result) }, save(state) { const keys = JSON.stringify(state).match(/[^\\]":/g).length const json = JSON.stringify(state, null, keys <= 200 ? 2 : 0) return writeFile(cachePath, json) } }, 1000) const StoreMiddleware = createMiddleware(engine) const createStoreWithMiddlewares = applyMiddleware( LoggerMiddleware, SocketMiddleware, StoreMiddleware )(createStore) const store = createStoreWithMiddlewares(reducer) const loader = createLoader(engine) loader(store) .catch(err => console.error('action#STATE_LOAD_ERROR', '\n', err)) export default store<file_sep>import { IFunctionAction } from '../interfaces' import { cloneFunction, installFunction, removeFunction } from '../manager' import { createHash } from 'crypto' import { removeKeys } from '../utils' import { join } from 'path' import db from '../db' import { clone, install, error, success } from '../actions/functions' export default (state, action: IFunctionAction) => { switch(action.type) { case 'ADD_FUNCTION': { const { url, repo, name } = action.payload /** Generate the id from a reproducible md5 hash */ const id = createHash('md5').update(url).digest('hex') /** Generate the function's path */ const path = join(process.cwd(), 'functions', id) const data = { id, url, name, path, repo, status: 'CLONING' } cloneFunction(id, repo) .then(done => db.dispatch(install(id))) .catch(err => db.dispatch(error(id, err))) return { ...state, [id]: data } } case 'INSTALL_FUNCTION': { const { id } = action.payload const path = join(process.cwd(), 'functions', id) installFunction([ 'install', '--json' ], { cwd: path }, db.dispatch) .then(done => db.dispatch(success(id))) .catch(err => db.dispatch(error(id, err))) return { ...state, [id]: { ...state[id], status: 'INSTALLING' } } } case 'SUCCESS_FUNCTION': { const { id } = action.payload return { ...state, [id]: { ...state[id], status: 'DONE' } } } case 'REMOVE_FUNCTION': { const { id } = action.payload removeFunction(id) .then(done => db.dispatch({ type: 'REMOVED_FUNCTION', payload: { id } })) .catch(err => error(id, err)) return { ...state, [id]: { ...state[id], status: 'REMOVING' } } } case 'REMOVED_FUNCTION': { return Object .keys(state) .reduce(removeKeys(state, action.payload.id), {}) } case 'EDIT_FUNCTION': { const { id } = action.payload /** Remove the current function */ db.dispatch({ type: 'REMOVE_FUNCTION', payload: { id } }) db.dispatch({ type: 'ADD_FUNCTION', payload: { ...state.functions[id], /** The data from the old function */ ...action.payload.changes } }) return { ...state, [id]: { ...state[id], status: 'CHANGING' } } } default: { return state || null } } }<file_sep>import { existsSync } from 'fs' import { spawn, SpawnOptions } from 'child_process' import { NPM_INSTALL } from './constants' import { NPM_CLIENT } from './constants' import { ISocket } from './interfaces' import { join } from 'path' import * as Clone from 'git-clone' import * as rmraf from 'rimraf' import * as pify from 'pify' /** Clone a git repository in a specified destination */ const clone = pify(Clone) const rmrf = pify(rmraf) export const removeFunction = async (id) => { const path = join(process.cwd(), 'functions', id) /** Remove the function folder if it already exists */ if (existsSync(path)) { await Promise.resolve(rmrf(path)) } } export const cloneFunction = async (id, repo) => { const path = join(process.cwd(), 'functions', id) await removeFunction(id) /** Clone the git repo containing the function */ await clone(repo, path) } export const installFunction = (args: string[], ops: SpawnOptions, dispatch?) => new Promise((resolve, reject) => { const process = spawn(NPM_CLIENT, args, ops) process.stdout.on('data', handleData(dispatch)) process.stderr.on('data', handleData(dispatch)) process.on('exit', handleExit(dispatch, resolve)) }) export const handleData = (dispatch) => data => { let __data = data try { data = JSON.parse(data.toString()) } catch(e) {} dispatch({ type: 'INSTALLATION_PROGRESS', payload: { data: __data, state: 'progress' } }) } export const handleExit = (dispatch, res) => code => { dispatch({ type: 'INSTALLATION_EXIT', payload: { code } }) res() }<file_sep>export class Benchmark { /** Starting time of the mesurament */ private start = process.hrtime() /** Returns the elapsed time */ public elapsed(): number { const end = process.hrtime(this.start) return (end[0] * 1000) + (end[1] / 1000000) } } <file_sep>import * as rd from 'readline' import db from './db' import { openSocket } from './socket' import { openApi } from './http' import { API_PORT, SOCKET_PORT, ADDRESS } from './constants' openSocket(any => console.log('The socket is listening on port socket://%s:%s', ADDRESS, SOCKET_PORT)) openApi(any => console.log('The https api is listening on port http://%s:%s', ADDRESS, API_PORT)) rd.createInterface(process.stdin, process.stdout) .on('line', (input: string) => input == 'db' && console.log(JSON.stringify(db.getState(), null, 2))) .on('line', (input: string) => input == 'clear' && process.stdout.write('\x1B[2J'))<file_sep>import { AnyAction } from 'redux' export const clone = (id: string, repo: string): AnyAction => ({ type: 'CLONE_FUNCTION', payload: { id, repo } }) export const install = (id: string): AnyAction => ({ type: 'INSTALL_FUNCTION', payload: { id } }) export const error = (id: string, error: any): AnyAction => ({ type: 'CLONE_FUNCTION', payload: { id, error } }) export const success = (id: string): AnyAction => ({ type: 'SUCCESS_FUNCTION', payload: { id } }) <file_sep>import { address } from 'ip' export const API_PORT = process.env.API_PORT || 8080 export const SOCKET_PORT = process.env.SOCKET_PORT || 3434 export const ADDRESS = address() export const NPM_CLIENT = process.env.NPM_CLIENT || 'yarn' export const NPM_INSTALL = JSON.parse(process.env.NPM_INSTALL || '["install", "--json"]')<file_sep>import { createServer } from 'net' import * as jsonSocket from 'json-socket' import { SOCKET_PORT } from './constants' import { ISocket } from './interfaces' import db from './db' export const connections: ISocket[] = [] /** * Handles the connection of a socket. * Starts to listen for any input. * * @param socket The new connected socket */ export const handler = (socket: ISocket) => { socket = new jsonSocket(socket) connections.push(socket) socket.on('message', db.dispatch) socket.on('close', e => connections.splice(connections.indexOf(socket), 1)) } /** * The socket server that enables connection between * the backend and any other socket stream */ export const openSocket = (callback: Function) => createServer(handler).listen(SOCKET_PORT, callback)<file_sep>import * as pify from 'pify' import { parse } from 'url' import { API_PORT } from './constants' import { NotFound } from './404' import { getHandler } from './spawner' import { Benchmark } from './bench' import { createServer, IncomingMessage, ServerResponse } from 'http' import { IRoute, IRoutes, IMiddleware } from './interfaces' import db from './db' /** * Checks if a function in the database matches a given url * * @param url The url to be matched */ export const matches = url => id => url == <IRoutes>db.getState()['functions'][id]['url'] /** * Flattens an array, recursively, using es6 reducers * * @param flat The previous result * @param toFlatten The new array to be merged */ export const flatten = (flat: IRoute[], toFlatten?: IRoutes) => flat.concat(toFlatten instanceof Array ? flatten(toFlatten) : toFlatten) /** * Handles every request to the api port, finding the * correct url action, executing it with middlewars with a * full express-like api, including next functions. * * @param req The server/client request * @param res The server/client response */ export const Router = async (req: IncomingMessage, res: ServerResponse) => { try { const functions = <IRoutes>db.getState()['functions'] const url = parse(req.url).pathname const ids = Object.keys(functions) const results = ids.filter(matches(url)).map(id => functions[id]) /** 404 handle case */ if(!results.length) { const customHandler = functions[ids.find(matches('404'))] results.push(customHandler || NotFound) } const timer = new Benchmark() const handlers = results.map(getHandler).reduce(flatten, []) res.on('finish', e => { console.info('[HTTP][' + url + ']', 'Executed in', timer.elapsed()) }) /** The looping index */ let i = -1 /** * Generates a looper that keeps executing middlewares * as long as they return the next call. * * @param handlers The array of middlewares * @param req The server/client request * @param res The server/client response */ const next = () => { i++ if (handlers[i]) { console.log('[HTTP][' + url + ']', 'Executing middleware', i) handlers[i](req, res, next) } } next() } catch(error) { db.dispatch({ type: 'ROUTE_ERROR', payload: { error } }) } } export const openApi = (callback: Function) => createServer(Router).listen(API_PORT, callback)<file_sep>import { Socket } from 'net' import { AnyAction } from 'redux' export interface ISocket extends Socket { sendMessage: (data: AnyAction) => void } /** * An object composed by strings or other nested StringObjects */ export type StringObject = { [key: string]: string | StringObject | Object } export interface IRoutes { [key: string]: IRoute | IMiddleware } export type IMiddleware = (req, res, next) => void export interface IRoute { id: string path: string url: string name: string repo: string } export interface IStore { functions: { [key: string]: IRoute } } export type IFunctionAction = { type: string, payload: IFunctioPayload } export interface IFunctioPayload extends IRoute { changes: IFunctioPayload [key: string]: any }<file_sep># <-- | Docker image infos | --> # * NodeJS Version: 8.x # * Yarn Version: 1.3.2 # * Docker Version: ^=17.05 # <-- | /\\-//\-\/-/\\-//\ | --> # # Do a full installation and compilation then # remove all unnecessary files and compilation packages # FROM mhart/alpine-node:8 WORKDIR /app COPY data.json \ yarn.lock \ tsconfig.json \ package.json \ ./ RUN apk update && apk upgrade && \ apk add --no-cache bash git openssh ADD src ./src/ RUN yarn install RUN yarn build RUN yarn install --production RUN rm -rf ./src EXPOSE 8080 EXPOSE 3434 CMD ["node", "lib"]<file_sep>import { readFileSync, existsSync } from 'fs' import { runInNewContext, createContext } from 'vm' import { join } from 'path' import { IRoute } from './interfaces' export const getFunction = (route: IRoute) => { const _exports = require(route.path) const result = _exports.__esModule ? _exports.default : _exports return result } export const getHandler = route => typeof route == 'function' ? route : getFunction(route)<file_sep>export const removeKeys = (base, ...ids: string[]) => (result, key: string) => { if (ids.indexOf(key) == -1) { result[key] = base[key] } return result }
c692d54cf2c26f5657e895cca3304f84667e9371
[ "Markdown", "TypeScript", "Dockerfile" ]
17
TypeScript
lucat1/servicer
cdfe46cd7a3d48d3c6e8aa5f065c2dc33334bf27
ed67cba3f70bcd9db921ddd9aa956e5b470e6fdd
refs/heads/master
<file_sep># HEIF Mac OS X 10.13.4+: Convert any image to HEIF/HEIC format Usage: ```HEIF [-q=quality] <image>``` where quality is in range from 0.1 (max compression) to 1.0 (lossless), default is 0.76 Compiling on macOS (to create executable `HEIF` and copying to `bin` folder): cd HEIF swiftc -O -o HEIF main.swift cp HEIF /usr/local/bin Or simply run from terminal without compilation: ./main.swift <image1> [optional image2] [optional image3] [...] Please note: odd image dimensions will be truncated by Apple's codec to even ones. ## Swift 5 runtime Starting with Xcode 10.2, Swift 5 command line programs you build require the Swift 5 runtime support libraries built into macOS. These libraries are included in the OS starting with macOS Mojave 10.14.4. When running on earlier versions of macOS, [this package](https://support.apple.com/kb/DL1998?locale=en_US) must be installed to provide the necessary Swift 5 libraries. <file_sep>#!/usr/bin/swift // Copyright (c) by <NAME>, 2021 // See MIT license in LICENSE file. import Foundation import CoreImage let kToolVersion = "0.5" // Options parsed from command line. let kCompressionQualityOption = "-q=" let defaultCompressionQuality = 0.76 var compressionQuality = defaultCompressionQuality // Fills options and returns input image file. // OR prints usage and exits if input file wasn't specified. func ParseCommandLine() -> [URL] { var urls: [URL] = [] for i in 1..<Int(CommandLine.argc) { let arg = CommandLine.arguments[i] if arg.hasPrefix(kCompressionQualityOption) { compressionQuality = Double(arg.suffix(arg.count - kCompressionQualityOption.count)) ?? compressionQuality if compressionQuality == 0.0 { print("Apple's compressor will use some internal default quality level, empirically it is 0.76-0.77") } } else { urls.append(URL(fileURLWithPath:arg)) } } if urls.count == 0 { let kBinaryName = URL(fileURLWithPath:CommandLine.arguments[0]).lastPathComponent print("Converts image to HEIC format, version \(kToolVersion)") print("Usage: \(kBinaryName) [\(kCompressionQualityOption)quality] <image>") print("Default quality is \(defaultCompressionQuality) and it ranges from 0.1 (max compression) to 1.0 (lossless).") print("Please note: odd image dimensions will be truncated by codec to even ones.") exit(0) } return urls } let imageUrls = ParseCommandLine() for imageUrl in imageUrls { let image = CIImage(contentsOf: imageUrl) let context = CIContext(options: nil) let heicUrl = imageUrl.deletingPathExtension().appendingPathExtension("heic") let options = NSDictionary(dictionary: [kCGImageDestinationLossyCompressionQuality:compressionQuality]) try! context.writeHEIFRepresentation(of:image!, to:heicUrl, format: CIFormat.ARGB8, colorSpace: image!.colorSpace!, options:options as! [CIImageRepresentationOption : Any]) }
0c328cdd22b54a0a6d8d6eef8b0871151cc5641f
[ "Markdown", "Swift" ]
2
Markdown
biodranik/HEIF
6cb2a8a600006a8acd593c509b6092722e4b191e
98deb14ea0d249d8c187bfe5f293632cfc0c8937
refs/heads/master
<file_sep>arreglo = ['NOMBRE','EDAD'] persona1 = ['Enrique', 22] persona2 = ['Luis', 20] persona3 = ['Mariana', 27] print ''' ### %s ## %s ### Nombre: %s Edad: %d Nombre: %s Edad: %d Nombre: %s Edad: %d ''' %(arreglo[0], arreglo[1], persona1[0], persona1[1],persona2[0],persona2[1],persona3[0],persona3[1]) <file_sep>''' Este programa calcula el area de un triangulo dependiendo de los valores que introduce el usuario ''' print 'Hola voy a calcular el area de un triangulo' base = float( raw_input('Introduce la base de tu triangulo: ')) altura = float( raw_input ('Introduce la altura: ') ) area = (base * altura) / 2 print 'La base de tu triangulo fue %.3f \n la altura %.3f \n y tu area es %.3f' % (base, altura, area) <file_sep># -*- coding: utf-8 -*- ''' Diplomado Python Programming: https://github.com/enriquemore587/Python-2017 ejemplos de diferentes codigos en lenguajes de programacion distinta. https://gist.github.com/enriquemore587 openoyxl: Es una libreria la cual nos permite trabajar con archivos de Excel en sus diferendes formatos. Referencia: https://openpyxl.readthedocs.io/en/default/ ''' #PASOS A SEGUIR PARA LA LECTURA DE UN DOCUMENTO DE EXCEL CON EXTENCION ".xlsx" from openpyxl import load_workbook ''' DEBEMOS DE TENER EN CUENTA QUE UN DOCUMENTO DE EXCEL ES UN LIBRO EL CUAL TIENE HOJAS DENTRO DE EL MISMO COMO CUALQUIER OTRO LIBRO QUE SEA FISICO ''' ###################################################### PRIMER PASO. #TENEMOS QUE CARGAR A NUESTRO PROGRAMA EL LIBRO(workbook) wb = load_workbook("Libro1.xlsx") ###################################################### SEGUNDO PASO. # TENEMOS QUE ADQUIRIR LA HOJA (worksheet) QUE QUEREREMOS TRABAJAR [TENER EN CUENTA QUE PUEDE TENER MAS DE UNA HOJA] # SE PUEDE MENSIONAR LA HOJA A TRABAJAR O BIEN CON EL METODO ACTIVE # NOS DEVUELVE O CARGAR LA ULTIMA HOJA CON LA QUE SE TRABAJO #ws = wb["Hoja 1"] ws = wb.active ###################################################### TERCER PASO #OPTENER EL RANGO DE LAS CELDAS A TRABAJAR cells = ws["A1:B23"] ###################################################### CUARTO PASO # PROCESAMIENTO DE LOS DATOS. for row in cells: for cell in row: print cell.value print "-" * 30<file_sep>count = 1 while count <=2: matricula = raw_input('Matricula: ') nombre = raw_input('Nombre: ') edad = raw_input('Edad: ') carrera = raw_input('Carrera: ') calificacion = raw_input('calificacion: ') dato = '%s, %s, %s, %s, %s\n' % (str(matricula), str(nombre), str(edad), str(carrera), str(calificacion)) archivo = open('alumnos.txt', 'a') archivo.write(dato) archivo.close() count = count + 1 <file_sep>nombres = [] #declaramos el arreglo while True: #iniciamos un ciclo infinito nombre = raw_input("Escribe un nombre: ") #capturamos el nombre if nombre == 'fin': #SI ES FIN print nombres #print 'Finish Him' # imprimes finish Him break# rompes el ciclo #else:# SI NO! nombres.append(nombre) #almacenas el valor dentro del arreglo ''' n = 0 #inicias la variable en cero while n < nombres.__len__(): # iniciamos el ciclo #el parametro "_len_()" # CUENTA LOS VALORES DENTRO DEL ARREGLO NOMBRES print nombres[n]# muestras el valor de cada posicion menor #a nombres._len_() n = n + 1#aumentamos la variable n '''<file_sep>count = 0 while count <= 10: archivo = open('archivo.txt', 'a') archivo.write(str(count)+"\n") count = count + 1 archivo.close() <file_sep>import matplotlib.pyplot as mpl import moduloDiplomado as mD p1 = mD.createDic("martin", 23, 8) print p1 graf = mD.graficaX2([1,2,3,4,5,6,7,8,9,10]) x = graf["eje x"] y = graf["eje y"] mpl.plot(x,y) mpl.show() <file_sep>def crearArchivo(nombre): archivo = open(nombre + '.txt', 'w') archivo.write('Nombre del archivo %s' %nombre) archivo.close() crearArchivo('archivo1')<file_sep>nom = 'enrique' edad = 22 print 'Nombre: %s Tiene: %d' % (nom, edad) print 'Nombre: ' + nom + ' Tiene: ' + str(edad)<file_sep>enrique = 22 juanito = 15 print enrique,juanito # sintaxis if <condicion>: # identacion ____~~~~~~~ # identacion ____~~~~~~~ if enrique > 18: #Los bloques de codigo se diferencian por 4 espacios o #un tabulador print True<file_sep># range(numero) # range devuelve un arreglo con el rango desde # cero hasta un numero antes del indicado. # ejemplo1 range(2) devuelve 0, 1 # ejemplo2 range(4) devuelve 0, 1, 2, 3 nombre = "<NAME>, <NAME>" print nombre # el arreglo que regresa range se almacena en tam tam = range(50) # imprimo el arreglo completo print tam <file_sep>import socket socket = socket.socket() server, port = str(raw_input('Server: ')), int(raw_input('Port: ')) msg = 'Te conectaras a: %s en el puerto %d' % (server, port) print msg socket.connect((server,port)) <file_sep>#aqui se declara una variable de tipo String. # la palabra nombre es el nombre de la variable. #Y lo que esta entre comillas es el valor nombre = 'Brenda' # "Brenda" es valido guardar el texto entre "" o '' #variable de tipo entero # edad es el nombre de la variable #23 es el valor de la variable edad. edad = 23 print 'Mi nombre es: ',nombre, '\nTengo: ',edad print nombre,"Tiene: ",edad,"anios"<file_sep>''' se requiere un programa en python que le de a escojer al usuario que area quiere calcular. puede ser: *circulo *triangulo *cuadrado posterior a que eliga el usuario que area quiere calcular, procedera a leer los requerimientos necesarios. formulas: circulo -> pi * radio **2 cuadrado -> lado por lado triangulo -> (base * altura) / 2 ''' # mandamos mensaje de opciones print ''' opciones a elegir 1:calculo de un triangulo 2:calculo de un cuadrado 3:calculo de circulo ''' # leemos la opcion & la convertimos en entero opcion = int(raw_input ('dame lo que quieres calcular')) #validamos si la opcion fue 1 = Triandulo if opcion == 1: # leemos la altura altura = float(raw_input ('dame la altura')) # leemos la base base = float(raw_input ('dame la base')) # calculamos operacion= (base*altura)/2 # imprimimos resultado print 'el area de un triangulo es : %f' %(operacion) elif opcion == 2: lado = float(raw_input ('dame un lado')) operacion= lado*lado print 'el area de un cuadrado es : %f' %(operacion) elif opcion == 3: pi= 3.1416 radio = float(raw_input ('dame el radio')) operacion= pi*(radio**2) print 'el area de un circulo es : %f' %(operacion) else: print 'Numero invalido' <file_sep>''' requerimientos: agregen: 2 campos mas: numeroAlumnos campo maestros [] programa con una funcion que agregue: maestros programa con una funcion que agregue: especialidad & dentro de la especialidad dos materias ''' def add_maestro(nombreCarrera, nombreMaestro): pass tese = [ { 'NameC': 'isc', 'NumeroS':9, 'esp':[] }, { 'NameC': 'Aereonautica', 'NumeroS':7, 'esp': [] }, { 'NameC':' Mecatronica', 'NumeroS':8, 'esp': [] } ] def add_esp (nameC,esp,mat): tam_car_tese = len(tese) pos_elem = range(tam_car_tese) for carrera in pos_elem: if tese[carrera]['NameC'] == nameC: print tese[carrera]['esp'] add_esp('isc','redes',['redes1','redes2'])<file_sep>import socket socket = socket.socket() server, port = str(raw_input('Server: ')), int(raw_input('Port: ')) msg = 'Te conectaras a: %s en el puerto %d' % (server, port) print msg socket.connect((server,port)) ### ya existe una conexion msg2 = raw_input('TU: ') socket.send(msg2) recibido = socket.recv(1024) print 'el Server dice: %s' % recibido socket.close() <file_sep>from openpyxl import load_workbook, Workbook from openpyxl.utils import column_index_from_string def write_xl(filename, sheet_name, ini_cell, data, labels): try: wb = load_workbook(filename) except: wb = Workbook() if wb.sheetnames.count(sheet_name) > 0: ws = wb[sheet_name] else: ws = wb.create_sheet(title=sheet_name) for j in range(len(labels)): cell = ws.cell( row = ws[ini_cell].row, column = column_index_from_string(ws[ini_cell].column) + j ) cell.value = labels[j] for i in range(len(data)): dic = data[i] for j in range(len(labels)): cell = ws.cell( row = ws[ini_cell].row + 1 + i, column = column_index_from_string(ws[ini_cell].column) + j ) cell.value = dic[labels[j]] wb.save(filename) def range_xl(filename, sheet_name, cell_range): wb = load_workbook(filename, data_only=True) ws = wb[sheet_name] cells = ws[cell_range] return cells def data_build(mat): keys = mat[0] coll = [] for i in range(1, len(mat)): dic = {} for j in range(0, len(keys)): k = keys[j] if k != None and k != "": dic[k] = mat[i][j] coll.append(dic) return coll def matrix_xl(cells): mat = [] for row_xl in cells: row = [] for cell in row_xl: row.append(cell.value) mat.append(row) return mat def load_xl(filename, sheet_name, cell_range): cells = range_xl(filename, sheet_name, cell_range) mat = matrix_xl(cells) return data_build(mat) def data_map(data, fn_map): aux = [] for dic in data: x = fn_map(dic) if x != None: aux.append(x) return aux def filtro(cliente): mes = cliente['Fecha'].split('-') if int(mes[1]) == 1: return '%s,%d' % (mes[1], cliente['Consumo']) <file_sep>#se requiere un programa que pida los datos de una persona que quiera entrar al ranas, para que pueda entrar al ranas necesita ser mayor de edad , si no nombre = str( raw_input('Escribe tu nombre: ')) edad = int( raw_input('Escribe tu Edad: ')) if edad > 17: print 'Empedate a gusto' else: print 'Ni modo chavo no entras' if True: print True else: print False <file_sep>import socket puerto = int(raw_input('Indica el puerto al que deceas apropiarte: ')) print 'Creacion del Servidor' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print 'Servidor creado' print 'Seva a apropiar del puerto' s.bind(("", puerto)) print 'Se apropio del puerto %d' % puerto print 'Indicamos el numero de clientes que vamos a contestar' s.listen(1) print 'El servidor esta esperando a que se conecte alguien.' sc, addr = s.accept() print 'Imprimimos el socket que se conecto' print sc print 'Mostramos la direccion de quiens e conecto' print addr recibido = sc.recv(1024) print 'Cliente dice: %s' % recibido msg2 = raw_input('Tu: ') sc.send(msg2) print 'Cerramos el socket cliente' sc.close() print 'Cerramos el socket server' s.close() <file_sep>nombre = raw_input('Escribe tu nombre: ') edad = raw_input('Escribe tu Edad: ') print "Hola %s tu edad es %d" % (str(nombre), int(edad)) <file_sep># declaracion del arreglo con 5 elementos arreglo1 = ["Enrique", "Uriel", "Brenda", "Daniel", "Mariana"] # declaracion de una variable que se va a iterar i = 0 # while i sea menor a 5 por que de esta forma al #aumentar a i en uno dentro del while solo #solo llegaria a 4 while i < 5: print "El nombre de la posicion %d es %s"%(i,str(arreglo1[i])) i = i + 1 for elemento in arreglo1: print elemento<file_sep># arreglo de 3 elementos o tres casillas arreglo = [1,2,3] # arreglo en la posicion 0 = 1 print arreglo[0] # arreglo en la posicion 1 = 2 print arreglo[1] # arreglo en la posicion 2 = 3 print arreglo[2] # devuelve la longitud del arreglo tam = len(arreglo) # tam vale el tamanio del arreglo # genera un arreglo de pendiendo de lo que valga tam print range(tam) <file_sep>#v1 es nuestra variable de tipo entero. #15 es nuestro valor de la variable. v1 = 15 #v2 es nuestra variable de tipo entero. #4 es nuestro valor de la variable. v2 = 4 #Operacion. resultado = v1 * v2 # resultado es una variable que almacena el resultado de v1 por v2 print resultado,'Es el resultado de la multiplicacion de v1 y v2' # resultado2 otra variable que almacena el resultado de v1 entre v2 resultado2 = v1 / v2 print resultado2,'Es el resultado de la division de v1 y v2'<file_sep>tupla = ('direccion', 15) servidor, puerto = tupla print servidor print puerto print type(tupla) <file_sep>numero = int(raw_input('Introduce un numero: ')) iteracion = 0 while numero != iteracion: iteracion = iteracion + 1 print iteracion archivo = open("archivo.txt", 'w') archivo.write("a") archivo.close() <file_sep>def get_edad(persona): return persona[1] datos = ['Nombre', 'Edad', 'Carrera'] persona1 = ['Enrique', 22, 'Sistemas Computacionales'] persona2 = ['Mariana', 22, 'Sistemas Computacionales'] edades = [] edades.append(get_edad(persona1)) edades.append(get_edad(persona2)) print edades[0] print edades[1]<file_sep>arreglo = ['Mariana',55] directorio1 = { 'Nombre': '<NAME>', 'Casa': 1 } print directorio1['Casa'] print arreglo[1] directorio2 = { 'Nombre': 'Maria', 'Casa': 24 } directorio3 = { 'Nombre': 'Juan', 'Casa': 65 } <file_sep>def cuadrado(x): x=x**2 print x for i in range(10): cuadrado(i)<file_sep>count = 0 while True: archivo = open('bucle.py', 'a') archivo.write(str(count)+"\n") count = count + 1 archivo.close() <file_sep>#arreglo con 5 elementos arreglo1 = ['Enrique', 'Uriel', 'Brenda', 'Daniel', 'Mariana'] # variable con valor 0 posicion = 0 # imprimo lo que contenga el arreglo en la posicion 0 print arreglo1[posicion] # variable posicion incrementa su valor en 1 posicion = posicion +1 # imprimo lo que contenga el arreglo en la posicion 1 print arreglo1[posicion] # variable posicion incrementa su valor en 1 & queda con un valor 2 posicion = posicion +1 # imprimo lo que contenga el arreglo en la posicion 2 print arreglo1[posicion] # variable posicion incrementa su valor en 1 & queda con un valor 3 posicion = posicion +1 # imprimo lo que contenga el arreglo en la posicion 3 print arreglo1[posicion] # variable posicion incrementa su valor en 1 & queda con un valor 4 posicion = posicion +1 # imprimo lo que contenga el arreglo en la posicion 4 print arreglo1[posicion]<file_sep>nombres = [] #declaramos el arreglo while True: #iniciamos un ciclo infinito nombre = raw_input("Escribe un nombre: ") #capturamos el nombre if nombre == 'fin': #SI ES FIN print nombres #print 'Finish Him' # imprimes finish Him break# rompes el ciclo #else:# SI NO! nombres.append(nombre) #almacenas el valor dentro del arreglo<file_sep>import matplotlib def createDic(nom,edad,calif): dic={ "nombre": nom, "edad" : edad, "calificacion" : calif } return dic def graficaX2(ejex): ejey = [] for i in ejex: ejey.append(i **2) dic2={ "eje x": ejex, "eje y": ejey } return dic2 <file_sep>barrio = [] def add_cond(directory): barrio.append(directory) directorio1 = { 'Nombre': '<NAME>', 'Casa': 1 } add_cond(directorio1) directorio2 = { 'Nombre': 'Maria', 'Casa': 24 } add_cond(directorio1) directorio3 = { 'Nombre': 'Juan', 'Casa': 65 } add_cond(directorio3) print barrio print barrio[1] print barrio[0]['Nombre'] <file_sep>#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################## # # # https://github.com/enriquemore587/Python-2017 # # ################################################################## cadena = 'ingenieria en Sistemas Computacionales' ''' Método: capitalize() Retorna: una copia de la cadena con la primera letra en mayúsculas. print cadena.capitalize() ''' ''' Método: lower() Retorna: una copia de la cadena en minúsculas. print cadena.lower() ''' ''' Método: upper() Retorna: una copia de la cadena en mayúsculas. print cadena.upper() ''' ''' Método: swapcase() Retorna: una copia de la cadena convertidas las mayúsculas en minúsculas y viceversa. print cadena.swapcase() ''' ''' Método: center(longitud[, "caracter de relleno"]) Retorna: una copia de la cadena centrada. print cadena.center(50, "=") ''' ''' Método: count("subcadena" [, posicion_inicio, posicion_fin]) Retorna: un entero representando la cantidad de apariciones de subcadena dentro de cadena. ''' ''' Método: startswith("subcadena" [, posicion_inicio, posicion_fin]) Retorna: True o False print cadena.startswith("inge") print cadena.startswith("en", 14) cadena = 'hola como' print cadena.startswith("co", 5) ''' ############################################## # sustitucion ''' Método: format(*args, **kwargs) Retorna: la cadena formateada. cadena = "Hola me llamo {0}" print cadena.format("enrique") ''' ''' Eliminar caracteres a la izquierda y derecha de una cadena Método: strip(["caracter"]) Retorna: la cadena sustituida. cadena = ' https://github.com/enriquemore587/Python-2017 ' print cadena.strip(' ') ''' ''' Eliminar caracteres a la izquierda de una cadena Método: lstrip(["caracter"]) Retorna: la cadena sustituida. cadena = ' https://github.com/enriquemore587/Python-2017 ' print cadena.lstrip("://" ) ''' ''' Unir una cadena de forma iterativa Método: join(iterable) Retorna: la cadena unida con el iterable (la cadena es separada por cada uno de los elementos del iterable). ''' formato_numero_factura = ("Nº 0000-0", "-0000 (ID: ", ")") numero = "275" numero_factura = numero.join(formato_numero_factura) print numero_factura ''' Partir una cadena en tres partes, utilizando un separador Método: partition("separador") Retorna: una tupla de tres elementos donde el primero es el contenido de la cadena previo al separador, el segundo, el separador mismo y el tercero, el contenido de la cadena posterior al separador. tupla = "https://github.com/enriquemore587/Python-2017".partition(".com") print tupla parte1, parte2, parte3 = tupla print "parte1: {0}\n parte2: {1}".format(parte1, parte2) ''' ''' Partir una cadena en en líneas Método: splitlines() Retorna: una lista donde cada elemento es una fracción de la cadena divida en líneas. t exto = """Linea 1 Linea 2 Linea 3 Linea 4 """ print texto print texto.splitlines() ''' <file_sep>personas = [ { 'nombre': 'Enrique', 'edad': 22, 'carrera': 'ISC' }, { 'nombre': 'Mariana', 'edad': 22, 'carrera': 'ISC' }, { 'nombre': 'Daniel', 'edad': 22, 'carrera': 'Informatica' } ] print personas for i in personas: print i
f7c3320c84e01b860018a74a36e5905dc1722359
[ "Python" ]
35
Python
enriquemore587/Python-2017
d2c003177dcf6d40f369bbd543b46762856ee278
d4b8052d8d9fc945ef99c2ef3c093855c5ab7f77
refs/heads/master
<repo_name>mekroth/vue-component-rollup<file_sep>/README.md # Vue Shared Component Template Using Rollup.js > Build your standalone shared components using this tiny template. > This template is Vue 2.x **only**. > Share Vue components to the rest of the world! ### Features - Very easy to use template with rollup.js pre-configured - Supports Import and Browser installation for your plugin with distribution-ready files - Supports Stylus - Easy development with auto-compilation and publishing to npm ### Usage ``` npm install -g vue-cli vue init mekroth/vue-component-rollup my-shared-component ``` The generated output in ./dist can be used with node and the browser. # Plugin Development ## Installation You need to install the default dependencies: ``` npm install ``` ## Watch and compile This will run webpack in watching mode and output the compiled files in the `dist` folder. ``` npm run dev ``` ## Manual build This will build the plugin into the `dist` folder in production mode. ``` npm run build ``` For configuration and detailed explanation on how things work, consult the [docs for vue-cli build](https://github.com/vuejs/vue-cli/blob/master/docs/build.md). ### TODO - Include unit testing - Correctly generate css in dist <file_sep>/template/rollup.config.js import path from 'path' import vue from 'rollup-plugin-vue' import babel from 'rollup-plugin-buble' import resolve from 'rollup-plugin-node-resolve' import progress from 'rollup-plugin-progress' import filesize from 'rollup-plugin-filesize' import uglify from 'rollup-plugin-uglify-es' import alias from 'rollup-plugin-path-alias' import postcss from 'rollup-plugin-postcss' import stylus from 'stylus' import clean from 'postcss-clean' const config = require('./package.json') const outputFile = '{{ name }}' /* eslint-disable */ const globalName = '{{ library }}' /* eslint-enable */ const preprocessor = (content, id) => new Promise((resolve, reject) => { const renderer = stylus(content, { filename: id, sourcemap: { inline: true } }); renderer.render((err, code) => { if (err) { return reject(err); } resolve({ code, map: renderer.sourcemap }); }); }); export default { entry: './src/index.js', format: 'umd', dest: './dist/'+outputFile+'.js', sourceMap: true, moduleName: globalName, plugins: [ progress({ clearLine: false // default: true }), filesize(), resolve({ jsnext: true, main: true, browser: true }), alias({ paths: { '~stylus': path.resolve(__dirname, 'src/stylus'), '~components': path.resolve(__dirname, 'src/components') }, extensions: ['js', 'styl', 'json', 'vue'] }), vue(), postcss({ sourceMap: true, extract: './dist/'+outputFile+'.css', plugins: [ clean ], preprocessor, extensions: ['.styl'] }), babel({ exclude: 'node_modules/**' }), uglify() ] } <file_sep>/template/src/components/container/ContainerHeader.js export default { name: 'v-container-header', render(h) { return h('div', { 'class': 'container-header' }) } } <file_sep>/meta.js function kebabToCamel(name) { name = name.replace(/\-(\w)/g, (match, p1) => { return p1.toUpperCase(); }); name = name.replace(/^\w/, (match) => { return match.toUpperCase(); }); return name } module.exports = { "prompts": { name: { type: 'string', required: true, message: 'Component name' }, library: { type: 'string', required: true, message: 'Library name for browser usage', default(answers) { if (answers.name) { return kebabToCamel(answers.name) } else { return '' } } }, description: { type: 'string', required: false, message: 'Component description', default: 'A Vue.js Component' }, version: { type: 'string', required: false, message: 'Initial version', default: '0.0.1' }, author: { type: 'string', message: 'Author' }, githubAccount: { type: 'string', required: false, message: 'GitHub Account', default: '' }, css: { type: "list", message: "Pick a css language", choices: [ "css", "sass", "less", "stylus" ] } }, helpers: { authorFullNameFrom: function (author) { const startPosition = author.indexOf('<') return author.slice(0, startPosition - 1) }, authorEmailFrom: function (author) { const startPosition = author.indexOf('<') const endPosition = author.indexOf('>') return author.slice(startPosition + 1, endPosition) } }, completeMessage: "To get started:\n\n cd {{destDirName}}\n npm install\n npm run dev" } <file_sep>/template/src/components/index.js import Container from './container' export default Object.assign({}, Container ) <file_sep>/template/src/index.js import '~stylus/main.styl' import Components from './components' function plugin (Vue) { Object.keys(Components).forEach(key => { Vue.component(key, Components[key]) }) } if (typeof window !== 'undefined' && window.Vue) { window.Vue.use(plugin) } export default plugin
f902fbde839cd67b416fdc0c087b6a1c03e2f9a1
[ "Markdown", "JavaScript" ]
6
Markdown
mekroth/vue-component-rollup
741b1c97995d3879db379a054702dcc2fc766dd0
a77d32e26c4332283e2c0b52de203fd1dbee3296
refs/heads/master
<repo_name>HIT-WI/CascadingFailure<file_sep>/get_CF_curve_node_isfail.py import numpy as np import torch import networkx as nx from scipy.special import comb import math import collections DG = nx.DiGraph() SAMPLE_NUM = 0 MAX_DEGREE_E = 0 MAX_DEGREE_D = 0 node_examine_num = 0 node_disease_num = 0 degree_avg_E = 0 degree_avg_D = 0 TIMESTEPS = 121 def read_data_init(filePath_init, filePath_death): global SAMPLE_NUM data_input = [] fail_node_all = [] with open(filePath_init, "r") as file_init: for line in file_init.readlines(): failure_pro_init = float(str(line.strip('\n')).split('\t')[1]) data_input.append(1 - failure_pro_init) fail_node_all.append(line.strip('\n').split('\t')[2].split('::')) data_death = [] with open(filePath_death, "r") as file_death: for line in file_death.readlines(): is_death = float(str(line.strip('\n')).split('\t')[1]) data_death.append(is_death) SAMPLE_NUM = len(data_input) return data_input, data_death, fail_node_all def build_data_graph(filePath_weight, filePath_graph): global MAX_DEGREE_E global MAX_DEGREE_D global node_examine_num global node_disease_num global degree_avg_E global degree_avg_D with open(filePath_graph, "r") as file_graph: for line in file_graph.readlines(): elements = str(line.strip('\n')).split('\t') if not DG.__contains__(elements[0]): DG.add_node(elements[0], type='examine') if not DG.__contains__(elements[1]): DG.add_node(elements[1], type='disease') with open(filePath_weight, "r") as file_weight: for line in file_weight.readlines(): # print(line) elements = str(line.strip('\n')).split('::')[0].split('-->') weight = float(str(line.strip('\n')).split('::')[1]) if not DG.has_edge(elements[0], elements[1]): DG.add_edge(elements[0], elements[1], weight=weight) degree_dict_e = {} degree_dict_d = {} for node in DG.nodes(data=True): if node[1]['type'] is 'examine': node_examine_num += 1 degree = DG.in_degree(node[0]) degree_avg_E += degree if degree in degree_dict_e: degree_dict_e[degree].append(node[0]) else: degree_dict_e.setdefault(degree, []).append(node[0]) else: node_disease_num += 1 degree = DG.in_degree(node[0]) degree_avg_D += degree if degree in degree_dict_d: degree_dict_d[degree].append(node[0]) else: degree_dict_d.setdefault(degree, []).append(node[0]) degree_avg_E /= node_examine_num degree_avg_D /= node_disease_num degree_dict_e_sorted = sorted(degree_dict_e.items(), key=lambda x: x[0]) degree_dict_d_sorted = sorted(degree_dict_d.items(), key=lambda x: x[0]) MAX_DEGREE_E = degree_dict_e_sorted[-1][0] MAX_DEGREE_D = degree_dict_d_sorted[-1][0] return degree_dict_e_sorted, degree_dict_d_sorted def generate_r(degree_dic_sorted): flag = 1 for degree_node in degree_dic_sorted: for node in degree_node[1]: node_j = np.zeros((degree_node[0] + 1, 1)) neighbor_alpha = [] for neighbor in DG.neighbors(node): neighbor_alpha.append(DG[neighbor][node]['weight']) neighbor_alpha = sorted(neighbor_alpha, reverse=True) for j in range(degree_node[0] + 1): if j == 0: continue else: node_j[j, 0] = (sum(neighbor_alpha[:j]) + sum(neighbor_alpha[-j:])) / 2 if flag == 1: sample = node_j flag = 0 else: sample = np.concatenate((sample, node_j), axis=0) return torch.tensor(sample) def cal_function_W(word_fail, label, x_input, beta, network_type): W = 0 node_ep = [] node_effetive_probability = {} fail_node_num_old = 0 for degree, sample in x_input: total_temp = 0 for node in sample: effetive_probability = 0 if label[node] == "0": if node in word_fail: m = len(word_fail[node]) else: m = 0 for j in range(degree + 1 - m): if network_type: index = node_index_dic_e[node] + j r = r_e[index] else: index = node_index_dic_d[node] + j r = r_d[index] effetive_probability += comb(degree - m, j) * math.pow(beta, j) * math.pow(1 - beta, degree - m - j) * r else: fail_node_num_old += 1 total_temp += effetive_probability if node not in node_effetive_probability: node_effetive_probability[node] = effetive_probability if network_type: W += total_temp / node_examine_num else: W += total_temp / node_disease_num node_effetive_probability_sorted = sorted(node_effetive_probability, key=node_effetive_probability.__getitem__, reverse=False) if network_type: fail_node_num = node_examine_num - int(W * node_examine_num) else: fail_node_num = node_disease_num - int(W * node_disease_num) total = (fail_node_num - fail_node_num_old) * 1 // 100 flag = 0 for node in node_effetive_probability_sorted: if flag >= total: break if label[node] == "0": for neighbor in DG.neighbors(node): if neighbor not in word_fail: word_fail[neighbor] = [] if node not in word_fail_dic[neighbor]: word_fail[neighbor].append(node) flag += 1 label[node] = "1" for degree, nodes in x_input: for node in nodes: if label[node] == "1": node_ep.append(1) else: node_ep.append(0) return W, np.array(node_ep).squeeze(), word_fail, label def cal_function_Z(word_fail, label, x_input, beta, network_type): Z = 0 for degree, sample in x_input: total_temp = 0 for node in sample: effetive_probability = 0 if label[node] == "0": if node in word_fail: m = len(word_fail[node]) else: m = 0 for j in range(degree - m): if network_type: index = node_index_dic_e[node] + j + 1 r = r_e[index] else: index = node_index_dic_d[node] + j + 1 r = r_d[index] total_temp += comb(degree - 1 - m, j) * math.pow(beta, j) * math.pow(1 - beta, degree - 1 - m - j) * r total_temp += effetive_probability if network_type: Z += degree * total_temp / (degree_avg_E * node_examine_num) else: Z += degree * total_temp / (degree_avg_D * node_disease_num) return Z department_id = ['7', '12', '15', '23', '50', '52'] for id in department_id: data_input, data_death, fail_node_all = read_data_init('./data/' + id + '/Initial_failure_probability_20_' + id + '_dis.txt', './data/' + id + '/hadm_death_20_' + id + '.txt' ) degree_dict_e_sorted, degree_dict_d_sorted = build_data_graph('./data/' + id + '/weight_' + id + '.txt', './data/' + id + '/exam_disease_20-' + id + '.txt') print('SAMPLE_NUM::', SAMPLE_NUM, ';MAX_DEGREE_E::', MAX_DEGREE_E, ';MAX_DEGREE_D::', MAX_DEGREE_D, ';node_examine_num::', node_examine_num, ';node_disease_num::', node_disease_num, ';degree_avg_E::', degree_avg_E, ';degree_avg_D::', degree_avg_D) r_e = generate_r(degree_dict_e_sorted) r_d = generate_r(degree_dict_d_sorted) node_index_dic_e = collections.OrderedDict() node_index_dic_d = collections.OrderedDict() index = 0 for degree, nodes in degree_dict_e_sorted: for node in nodes: node_index_dic_e[node] = index index += degree + 1 index = 0 for degree, nodes in degree_dict_d_sorted: for node in nodes: node_index_dic_d[node] = index index += degree + 1 isfail_node_all = [] for sample_index in range(SAMPLE_NUM): word_label = {} word_fail_dic = {} fail_node = [] fail_node_exam_all = [] for node in DG.nodes: if node in fail_node_all[sample_index]: word_label[node] = "1" else: word_label[node] = "0" for node in fail_node_all[sample_index]: for neighbor in DG.neighbors(node): if neighbor not in word_fail_dic: word_fail_dic[neighbor] = [] if node not in word_fail_dic[neighbor]: word_fail_dic[neighbor].append(node) miu = np.zeros(TIMESTEPS + 1) miu[0] = data_input[sample_index] state = np.zeros(TIMESTEPS + 1) state[0] = data_input[sample_index] isfail_node_e = [] isfail_node_d = [] init_node_d = [] for degree, nodes in degree_dict_d_sorted: for node in nodes: if word_label[node] == "1": init_node_d.append(1) else: init_node_d.append(0) isfail_node_d.append(np.array(init_node_d)) for time_step in range(TIMESTEPS): if time_step % 2 == 0: miu_now, node_ep, word_fail_exam, word_label = cal_function_W(word_fail_dic, word_label, degree_dict_e_sorted, state[time_step], True) miu[time_step + 1] = miu_now isfail_node_e.append(node_ep) h_state_temp = cal_function_Z(word_fail_dic, word_label, degree_dict_e_sorted, state[time_step], True) state[time_step + 1] = h_state_temp else: miu_now, node_ep, word_fail_dic, word_label = cal_function_W(word_fail_exam, word_label, degree_dict_d_sorted, state[time_step], False) miu[time_step + 1] = state[0] * miu_now isfail_node_d.append(node_ep) h_state_temp = cal_function_Z(word_fail_exam, word_label, degree_dict_d_sorted, state[time_step], False) state[time_step + 1] = state[0] * h_state_temp isfail_node = np.concatenate((np.array(isfail_node_e), np.array(isfail_node_d)), axis=1) isfail_node_all.append(isfail_node) isfail_all = np.array(isfail_node_all) path_name = './data/' + id + '/CF_node_is_fail.npy' np.save(path_name, isfail_all)<file_sep>/README.md # DECAF: An Interpretable Deep Cascading Framework for ICU Mortality Prediction ./data includes data of 6 wards, and each ward has the following data:\ (1)exam_disease: Ward network structure\ (2)hadm_death: Whether the patient died in hospital, 1 is death, 0 is survival\ (3)hadm_record: Abnormal examination and disease of all patients during hospitalization\ (4)Initial_character: Initial characteristics of network nodes\ (5)Initial_failure_probability: Initial failure probability of patients in disease network\ (6)weight: Weights of directed edges in networks\ \ Program start\ First,run ./get_CF_curve_node_eff_pro.py or ./get_CF_curve_node_isfail.py\ Second,run ./predict_GRU_CUDA.py ./predict_lstm_CUDA.py ./predict_Transformer_CUDA.py <file_sep>/predict_Transformer_CUDA.py import numpy as np import torch from torch import nn import math import time batch_size = 776 * 2 epochs = 1000 SEED = 16 np.random.seed(SEED) cuda = torch.cuda.is_available() if cuda: torch.cuda.manual_seed(SEED) class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=60): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:x.size(0), :] return x class CF(nn.Module): def __init__(self, n_hidden, length, input_dim, hidden_dim, layer_num): super(CF, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.pos_encoder = PositionalEncoding(n_hidden) self.hidden_ = torch.nn.Linear(input_dim, n_hidden) encoder_layer = torch.nn.TransformerEncoderLayer(d_model=n_hidden, nhead=2, dim_feedforward=hidden_dim, dropout=0.0) self.transformer = torch.nn.TransformerEncoder(encoder_layer, num_layers=layer_num) self.hidden = torch.nn.Linear(n_hidden, 1) self.out = torch.nn.Linear(length, 2) def forward(self, miu): miu = self.hidden_(miu) miu = self.pos_encoder(miu) x = self.transformer(miu) x = self.hidden(x) output = self.out(x.squeeze().transpose(0, 1)) return output data_death = [] with open("./data/7/hadm_death_20_7.txt", "r") as file_death: for line in file_death.readlines(): is_death = float(str(line.strip('\n')).split('\t')[1]) data_death.append(is_death) data_death_T = torch.LongTensor(data_death) group = 776 # cf_record = np.load("./data/7/CF_node_polynomial_eff_pro.npy") # cf_record = np.load("./data/7/CF_node_is_fail.npy") cf_record = np.load("./data/7/CF_node_eff_pro.npy") hidden_dim_all = [256, 32, 64, 128] weight_CRs = [0.3, 0.5, 0.7, 0.9, 1.0] N_HIDDEN = 128 MAX_AUC_list = [] for index in range(5): # with open("./data/7/AUC_result_Transformer_polynomial_eff_pro.txt", "a") as f: with open("./data/7/AUC_result_Transformer_node_eff_pro.txt", "a") as f: # with open("./data/7/AUC_result_Transformer_node_is_fail.txt", "a") as f: f.write(str(index+1)+'\n') max_AUC = 0 for weight_CR in weight_CRs: for hidden_dim in hidden_dim_all: length = 60 input_dim = 452 layer_num = 2 learning_rate = 0.0005 if index == 0: cal_r_is_fail_train = cf_record[group*1:] cal_r_is_fail_test = cf_record[:group*1] elif index == 4: cal_r_is_fail_train = cf_record[:group*4] cal_r_is_fail_test = cf_record[group*4:] else: cal_r_is_fail_train = np.concatenate((cf_record[group*0:group*index], cf_record[group*(index+1):])) cal_r_is_fail_test = cf_record[group*(index):group*(index+1)] miu_train_new = [] for i in range(cal_r_is_fail_train.shape[1]): miu_train_new.append(cal_r_is_fail_train[:,i,:]) miu_train_new = torch.Tensor(miu_train_new) miu_test_new = [] for i in range(cal_r_is_fail_test.shape[1]): miu_test_new.append(cal_r_is_fail_test[:,i,:]) miu_test_new = torch.Tensor(miu_test_new) net = CF(n_hidden=N_HIDDEN, length=length, input_dim=input_dim, hidden_dim=hidden_dim, layer_num=layer_num) optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) loss_func = torch.nn.CrossEntropyLoss(weight=torch.Tensor([weight_CR, 1.0]).cuda()) if cuda: net.cuda() # miu_train_new = miu_train_new.cuda() miu_test_new = miu_test_new.cuda() data_death_T = data_death_T.cuda() best_epoch = 0 best_loss = 10000 bad_count = 0 patience = 100 start = time.time() steps = group * 4 // batch_size for epoch in range(epochs): now = time.time() net.train() optimizer.zero_grad() for step in range(steps): head = step * batch_size tail = (step + 1) * batch_size miu_train_batch = miu_train_new[:, head:tail, :] if cuda: miu_train_batch = miu_train_batch.cuda() out_train = net(miu_train_batch) if index == 0: data_death_Tensor = data_death_T[group*1:] elif index == 4: data_death_Tensor = data_death_T[:group*4] else: data_death_Tensor = torch.cat((data_death_T[:group*index], data_death_T[group*(index+1):])) loss = loss_func(out_train, data_death_Tensor[head:tail]) loss.backward() optimizer.step() net.eval() out_test = net(miu_test_new) if index == 0: loss_test = loss_func(out_test, data_death_T[:group*1]) target_y = data_death_T[:group*1].cpu().data.numpy() elif index == 4: loss_test = loss_func(out_test, data_death_T[group*4:]) target_y = data_death_T[group*4:].cpu().data.numpy() else: loss_test = loss_func(out_test, data_death_T[group*index:group*(index+1)]) target_y = data_death_T[group*index:group*(index+1)].cpu().data.numpy() if loss_test < best_loss: best_loss = loss_test best_epoch = epoch bad_count = 0 best_out_test = out_test.cpu() else: bad_count += 1 if bad_count == patience: break True_sample_pro = best_out_test[:, 0].detach().numpy().tolist() index_dic = {} for id, pro in enumerate(True_sample_pro): index_dic[id] = pro index_dic = sorted(index_dic.items(), key=lambda item:item[1], reverse=True) FPR_list = [0.0] TPR_list = [0.0] TP = 0 FN = 0 FP = 0 TN = 0 count = 0 for id in index_dic: if target_y[id[0]] == 0: TP += 1 else: FP += 1 if index == 4: FN = group + 4 - target_y.sum() - TP else: FN = group - target_y.sum() - TP TN = target_y.sum() - FP FPR = FP / (TN + FP) TPR = TP / (TP + FN) FPR_list.append(FPR) TPR_list.append(TPR) AUC = 0 for id, x in enumerate(FPR_list[:-1]): AUC += (FPR_list[id+1] - x) * (TPR_list[id] + TPR_list[id+1]) AUC = AUC/2 if AUC > max_AUC: max_AUC = AUC f.write("seq_len=" + str(length) + ' ') f.write("input=" + str(input_dim) + ' ') f.write("hidden=" + str(hidden_dim) + ' ') f.write("layer_num=" + str(layer_num) + ' ') f.write("w_CR=" + str(weight_CR) + ' ') f.write("lr=" + str(learning_rate) + ' ') f.write("AUC="+str(AUC)+'\n') f.write("MAX_AUC="+str(max_AUC)+'\n') MAX_AUC_list.append(max_AUC) if index == 4: sum = 0 f.write('(') for i, auc in enumerate(MAX_AUC_list): sum += auc if i < 4: f.write(str(auc) +'+') else: f.write(str(auc)) f.write(')/5=') f.write(str(sum/5) + '\n') <file_sep>/predict_GRU_CUDA.py import numpy as np import torch from torch import nn import time epochs = 2000 SEED = 16 np.random.seed(SEED) cuda = torch.cuda.is_available() if cuda: torch.cuda.manual_seed(SEED) class CF(nn.Module): def __init__(self, input_dim, hidden_dim, layer_num): super(CF, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.gru = torch.nn.GRU(input_size=self.input_dim, hidden_size=self.hidden_dim, num_layers=layer_num) self.out = torch.nn.Linear(self.hidden_dim, 2) # output layer def forward(self, miu): output, hn = self.gru(miu) output_in_last_timestep = hn[-1, :, :] x = self.out(output_in_last_timestep) return x data_death = [] with open("./data/7/hadm_death_20_7.txt", "r") as file_death: for line in file_death.readlines(): is_death = float(str(line.strip('\n')).split('\t')[1]) data_death.append(is_death) data_death_Tensor = torch.LongTensor(data_death) group = 776 # cf_record = np.load("./data/7/CF_node_polynomial_eff_pro.npy") # cf_record = np.load("./data/7/CF_node_is_fail.npy") cf_record = np.load("./data/7/CF_node_eff_pro.npy") hidden_dim_all = [32, 64, 128, 256] weight_CRs = [0.3, 0.5, 0.7, 0.9, 1.0] MAX_AUC_list = [] for index in range(5): # with open("./data/7/AUC_result_GRU_polynomial_eff_pro.txt", "a") as f: with open("./data/7/AUC_result_GRU_node_eff_pro.txt", "a") as f: # with open("./data/7/AUC_result_GRU_node_is_fail.txt", "a") as f: f.write(str(index+1)+'\n') max_AUC = 0 for weight_CR in weight_CRs: for hidden_dim in hidden_dim_all: length = 60 input_dim = 451 layer_num = 2 learning_rate = 0.0005 if index == 0: cal_r_is_fail_train = cf_record[group * 1:] cal_r_is_fail_test = cf_record[:group * 1] elif index == 4: cal_r_is_fail_train = cf_record[:group * 4] cal_r_is_fail_test = cf_record[group * 4:] else: cal_r_is_fail_train = np.concatenate((cf_record[group * 0:group * index], cf_record[group * (index + 1):])) cal_r_is_fail_test = cf_record[group * index:group * (index + 1)] miu_train_new = [] for i in range(cal_r_is_fail_train.shape[1]): miu_train_new.append(cal_r_is_fail_train[:, i, :]) miu_train_new = torch.Tensor(miu_train_new) miu_test_new = [] for i in range(cal_r_is_fail_test.shape[1]): miu_test_new.append(cal_r_is_fail_test[:, i, :]) miu_test_new = torch.Tensor(miu_test_new) net = CF(input_dim=input_dim, hidden_dim=hidden_dim, layer_num=layer_num) # d optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) loss_func = torch.nn.CrossEntropyLoss(weight=torch.Tensor([weight_CR, 1.0]).cuda()) if cuda: net.cuda() miu_train_new = miu_train_new.cuda() miu_test_new = miu_test_new.cuda() data_death_Tensor = data_death_Tensor.cuda() best_epoch = 0 best_loss = 10000 bad_count = 0 patience = 100 start = time.time() for epoch in range(epochs): now = time.time() net.train() optimizer.zero_grad() out_train = net(miu_train_new) if index == 0: loss = loss_func(out_train, data_death_Tensor[group*1:]) elif index == 4: loss = loss_func(out_train, data_death_Tensor[:group*4]) else: loss = loss_func(out_train, torch.cat((data_death_Tensor[:group*index], data_death_Tensor[group*(index+1):]))) loss.backward() optimizer.step() net.eval() out_test = net(miu_test_new) if index == 0: loss_test = loss_func(out_test, data_death_Tensor[:group*1]) target_y = data_death_Tensor[:group*1].cpu().data.numpy() elif index == 4: loss_test = loss_func(out_test, data_death_Tensor[group*4:]) target_y = data_death_Tensor[group*4:].cpu().data.numpy() else: loss_test = loss_func(out_test, data_death_Tensor[group*index:group*(index+1)]) target_y = data_death_Tensor[group*index:group*(index+1)].cpu().data.numpy() if loss_test < best_loss: best_loss = loss_test best_epoch = epoch bad_count = 0 best_out_test = out_test.cpu() else: bad_count += 1 if bad_count == patience: break True_sample_pro = best_out_test[:, 0].detach().numpy().tolist() index_dic = {} for id, pro in enumerate(True_sample_pro): index_dic[id] = pro index_dic = sorted(index_dic.items(), key=lambda item:item[1], reverse=True) FPR_list = [0.0] TPR_list = [0.0] TP = 0 FN = 0 FP = 0 TN = 0 for id in index_dic: if target_y[id[0]] == 0: TP += 1 else: FP += 1 if index == 4: FN = group + 4 - target_y.sum() - TP else: FN = group - target_y.sum() - TP TN = target_y.sum() - FP FPR = FP / (TN + FP) TPR = TP / (TP + FN) FPR_list.append(FPR) TPR_list.append(TPR) AUC = 0 for id, x in enumerate(FPR_list[:-1]): AUC += (FPR_list[id+1] - x) * (TPR_list[id] + TPR_list[id+1]) AUC = AUC/2 if AUC > max_AUC: max_AUC = AUC f.write("seq_len=" + str(length) + ' ') f.write("input=" + str(input_dim) + ' ') f.write("hidden=" + str(hidden_dim) + ' ') f.write("layer_num=" + str(layer_num) + ' ') f.write("w_CR=" + str(weight_CR) + ' ') f.write("lr=" + str(learning_rate) + ' ') f.write("AUC="+str(AUC)+'\n') f.write("MAX_AUC="+str(max_AUC)+'\n') MAX_AUC_list.append(max_AUC) if index == 4: sum = 0 f.write('(') for i, auc in enumerate(MAX_AUC_list): sum += auc if i < 4: f.write(str(auc) +'+') else: f.write(str(auc)) f.write(')/5=') f.write(str(sum/5) + '\n')
e23fb8c9505dcd49ce276db5568673b53b5f9174
[ "Markdown", "Python" ]
4
Python
HIT-WI/CascadingFailure
296ddba17c4317377029f65618129bf2aaa21a30
9d2a7e1bd2ff57a497cd1de89881279a436d6600
refs/heads/main
<file_sep># export-date-of-retweet Export original date of retweet ## About Current twitter archive (as of 2020) includes 'created_at' property of retweet. However, it does not show the date of original tweet but the date of retweeting it. This app exports the date of the original tweet from your twitter archive. The target archive file is '(archive directory)/data/tweet.js'. Please enter a valid path to the file. This app use Twitter API. You must prepare your 'consumer key' 'consumer key secret' 'callback url' in advance. ## Usage ``` $ npm run build $ node dist/main.js --cousumer-key=<consumer key> --consumer-key-secret=<consumer key secret> --callback-url=<callback url> <your path to tweet.js> ``` After you run this app from the console, <callback url> is shown on your web browser. Please get <oauth_token> and <oauth_verifier> from the url of the shown page, then enter them to the console. ``` Enter oauth_token: Enter oauth_verifier: ``` The output file is 'retweets_with_original_date.csv' in the same directory of tweet.js. ## Note This app uses tweet.js and Twitter API, however all processing could be completed with just Twitter API. <file_sep>import Twitter from 'twitter-lite'; import fs from 'fs'; import open from 'open'; const usage = () => { console.log(`Usage: node dist/main.js --cousumer-key=<key> --consumer-key-secret=<secret> --callback-url=<callback url> <your path to tweet.js> e.g.) $ node dist/main.js /home/your-name/twitter-archive/data/tweet.js`); } if (process.argv.length !== 6) { usage(); process.exit(1); } let tweet_path; let consumer_key = ''; let consumer_key_secret = ''; let callback_url = ''; let invalid_arg = true; for (let i = 2; i < 5; i++) { const arg = process.argv[i]; const arg_array = arg.split('='); if (arg_array.length == 2) { switch (arg_array[0]) { case '--consumer-key': consumer_key = arg_array[1]; invalid_arg = false; break; case '--consumer-key-secret': consumer_key_secret = arg_array[1]; invalid_arg = false; break; case '--callback-url': callback_url = arg_array[1]; invalid_arg = false; break; default: break; } } } if (invalid_arg) { console.error('Error: Invalid args'); usage(); process.exit(1); } tweet_path = process.argv[5]; const output_path = tweet_path.replace('tweet.js', 'retweets_with_original_date.csv'); if (!fs.existsSync(tweet_path)) { console.error(`Error: File not exist: ${tweet_path}`); usage(); process.exit(1); } let tweet_str; try { tweet_str = fs.readFileSync(tweet_path, 'utf-8'); } catch { console.error('Error: Cannot read file'); usage(); process.exit(1); } let tweet_json: any; try { // Replace assignment expression to JSON format const tweet_str_json = tweet_str.replace('window.YTD.tweet.part0 = ', ''); tweet_json = JSON.parse(tweet_str_json); } catch { console.error('Error: Invalid file format'); usage(); process.exit(1); } let success = 0; let failure = 0; let output_data = ''; const getOriginalTweetsByAPI = async () => { let accessToken: any; const auth = async () => { const client = new Twitter({ // @ts-ignore consumer_key: consumer_key, // @ts-ignore consumer_secret: consumer_key_secret }); const reqToken: any = await client.getRequestToken(callback_url).catch(console.error); open(`https://api.twitter.com/oauth/authenticate?oauth_token=${reqToken.oauth_token}`); function readUserInput(question: string): Promise<string> { const readline = require('readline').createInterface({ input: process.stdin, output: process.stdout }); return new Promise<string>((resolve, reject) => { readline.question(question, (answer: string) => { resolve(answer); readline.close(); }); }); } const oauth_token = await readUserInput('Enter oauth_token: '); const oauth_verifier = await readUserInput('Enter oauth_verifier: '); accessToken = await client.getAccessToken({ oauth_token: oauth_token, oauth_verifier: oauth_verifier }).catch(console.error); } await auth(); // console.log(accessToken); const app = new Twitter({ consumer_key: consumer_key, consumer_secret: consumer_key_secret, access_token_key: accessToken.oauth_token, access_token_secret: accessToken.oauth_token_secret }); const getTweets = async (ids: string) => { const tweets = await app.get('statuses/lookup', { id: ids, }).catch(err => console.dir(err, { depth: 10 })); if (tweets) { tweets.forEach((tweet: any) => { const id = tweet.id_str; try { if (tweet['retweeted_status']) { // console.dir(tweet['retweeted_status'], { depth: 10 }); // Calc date from id_str. // tweet['retweeted_status']['created_at'] is also available. // Use ECMAScript2020 for BigInt const original_tweet_id = BigInt(tweet['retweeted_status']['id_str']); // console.log(`${original_tweet_id}`); const original_tweet_id_int = parseInt((original_tweet_id >> 22n).toString(), 10); const original_tweet_date = new Date(original_tweet_id_int + 1288834974657); const original_tweet_user = tweet['retweeted_status']['user']['screen_name']; let output = id + ',' + original_tweet_date.toString() + ',' + original_tweet_user + ',https://twitter.com/' + original_tweet_user + '/status/' + tweet['retweeted_status']['id_str']; if (tweet['retweeted_status']['entities']['media'] && tweet['retweeted_status']['entities']['media'][0]['type']) { const original_tweet_media = tweet['retweeted_status']['entities']['media'][0]['type']; output += ',' + original_tweet_media; } else { output += ',nomedia'; } // console.log(output); output += '\n'; output_data += output; success++; } else { console.log(`# Retweet does not exist in ${id}.`); failure++; } } catch { console.log(`# Original id does not exist in ${id}.`); failure++; } }); } }; const sleep = (msec: number) => new Promise(resolve => setTimeout(resolve, msec)); let ids = ''; let count = 0; for (let i = 0; i < tweet_json.length; i++) { const elm = tweet_json[i]; let id = elm.tweet.id; let full_text = elm.tweet.full_text; if (full_text && full_text.startsWith('RT @')) { ids += id; count++; if (count % 100 === 0 || i === tweet_json.length - 1) { await getTweets(ids); console.log(count + '...'); ids = ''; await sleep(10000); } else { ids += ','; } } else { console.log('# This is not RT.'); } } fs.writeFileSync(output_path, output_data); console.log(`Done. Output: ${output_path} Success: ${success} Failure: ${failure}`); }; getOriginalTweetsByAPI();
3337a30850695192a0d04416d7bf0fd6124b2050
[ "Markdown", "TypeScript" ]
2
Markdown
sosuisen/export-date-of-retweet
f037ce64aaeeb777f922e2c35dd1d812fca43b07
934ef2b67517d53aa33e43678e4dbc4fdb8deb3c
refs/heads/master
<repo_name>SpaceBaloon/FXDPL<file_sep>/src/data/managers/BasicDAO.java package data.managers; import data.QueryObject; import data.wrappers.Wrapper; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import javafx.beans.property.ReadOnlyObjectProperty; import javafx.beans.property.ReadOnlyObjectWrapper; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import javafx.collections.WeakListChangeListener; /** * This class serves data managing retrieved from DB. * Data is cached and represents with ObservableList. * By means of this class you have next opportunities: * <ul> * <li>Retrieve data using {@code BasicDAO.open()}.</li> * <li>Cached data by default.</li> * <li>Manipulate data (add, update, delete) via {@code BasicDAO.getData()} property.</li> * <li>Cancel updates using {@code BasicDAO.cancel()}</li> * <li>Commit changes to DB via {@code BasicDAO.commit()}</li> * </ul> * TODO: change data representation from ArrayList to Set to avoid duplicates. * @author <NAME> * @param <T> type that reflect quered structure. */ public abstract class BasicDAO<T> { private boolean skip = false; /** * Helper that represents pair of updatable object and updating object and * also index that reflect position of updatable object. * Index is needed because list can contains duplicates. * * @param <T> */ public static class Pair<T> { private final Integer idx; public Integer getIdx() { return idx; } private T lastValue; private T initValue; public void setLastValue( T lastValue ) { this.lastValue = lastValue; } public void setInitValue( T initValue ) { this.initValue = initValue; } public T getLastValue() { return lastValue; } public T getInitValue() { return initValue; } public Pair( Integer idx, T initValue, T lastValue ) { this.idx = idx; this.lastValue = lastValue; this.initValue = initValue; } } /** * Query object is needed to retrive info about query text, parameters etc. */ private QueryObject query; public QueryObject getQuery() { return query; } public void setQuery( QueryObject query ) { this.query = query; } /** * Cached data retrieved from DB. * @return ObservableList. */ private ObservableList<T> data = FXCollections.observableArrayList(); public ObservableList<T> getData() { return data; } /** * It's used to listen new data coming. */ private final ReadOnlyObjectWrapper< ObservableList<T> > dataProperty; public ReadOnlyObjectProperty< ObservableList<T> > getDataProperty() { return dataProperty; } /** * Track removed items. */ private final List<T> removedItemsList = new ArrayList<>(); public List<T> getRemovedItemsList() { return removedItemsList; } /** * Track updated items. */ private final List<Pair<T>> updatedItemsList = new ArrayList<>(); public List<Pair<T>> getUpdatedItemsList() { return updatedItemsList; } /** * Track added items. */ private final List<T> addedItemsList = new ArrayList<>(); public List<T> getAddedItemsList() { return addedItemsList; } /** * Listener for cached data. */ private final WeakListChangeListener<T> listChangeListener = new WeakListChangeListener<>( ( c ) -> { if( skip ) return; while( c.next() ) { if( c.wasReplaced() ) { List<? extends T> removed = c.getRemoved(); List<? extends T> addedSubList = c.getAddedSubList(); int size = removed.size() > addedSubList.size() ? removed.size() : addedSubList.size(); //for each replaced items for( int i=0; i < size; i++ ) { T remItem = removed.get( i ); T addItem = addedSubList.get( i ); //No actions for null values and for equal items if( remItem == null || addItem == null || remItem.equals( addItem ) ) { continue; }; //if replace new items with another one if( addedItemsList.contains( remItem ) ) { addedItemsList.set( addedItemsList.indexOf( remItem ), addItem ); } else { //for new items boolean asNew = true; //traverse updatedItemList for( Pair<T> pair : updatedItemsList ) { //looking for match with lastValue //(initValue always point out to true DB data) if( pair.lastValue.equals( remItem ) ) { pair.setLastValue( addItem ); asNew = false; break; } } if( asNew ) { updatedItemsList.add( new Pair<>( c.getFrom() + i, remItem, addItem ) ); } } } } else if( c.wasRemoved() ) { //only if removed item was obtained from DB(including changed) c.getRemoved().forEach( ( item ) -> { if( !addedItemsList.contains( item ) ) { //it's not new item T inst = item; Pair remPair = null; //try to find this item in updated list //if found remove pair from updated list and add to removed for( Pair<T> pair : updatedItemsList ) { if( pair.getLastValue().equals( item ) ) { remPair = pair; inst = pair.getInitValue(); break; } } if( remPair != null ) updatedItemsList.remove( remPair ); removedItemsList.add( inst ); } else { addedItemsList.remove( item ); } }); } else if( c.wasAdded() ) { c.getAddedSubList().forEach( (item) -> { if( removedItemsList.contains( item ) ) { removedItemsList.remove( item ); } else addedItemsList.add( item ); }); } } } ); public BasicDAO() { this( null ); } public BasicDAO( QueryObject query ) { this.query = query; dataProperty = new ReadOnlyObjectWrapper<>( data ); } public ObservableList<T> open( Object[] params ) throws SQLException { if( query == null ) throw new RuntimeException( "Set QueryObject before calling open." ); query.setParameters( params ); return open( query ); } /** * After reopening we need to rebind our listeners to new list. * @param query QueryObject that provide information about this query. * @return ObservableList * @throws SQLException */ public ObservableList<T> open( QueryObject query ) throws SQLException { List<T> newData = new ArrayList<>(); this.query = query; if( query != null && getWrapper() != null && query.getConnection() != null ) { final String QUERY_TEXT = query.getSelectText(); if( QUERY_TEXT != null && !QUERY_TEXT.isEmpty() ) { Connection con = query.getConnection(); try ( PreparedStatement ps = con.prepareStatement( QUERY_TEXT )) { int idx = 1; Object[] params = query.getParameters(); if( params != null ) for( Object param : query.getParameters() ) { ps.setObject( idx++, param ); } ResultSet rs = ps.executeQuery(); while( rs.next() ) { newData.add( ( T ) getWrapper().wrap( rs, getWrapperClass() ) ); } } } } data = FXCollections.observableArrayList( newData ); data.addListener( listChangeListener ); dataProperty.set( data ); return data; } abstract public Wrapper getWrapper(); abstract public Class<?> getWrapperClass(); /** * TODO: after cancel it's needed to restore init order. */ public void cancel() { skip = true; try { updatedItemsList.forEach( ( pair ) -> { int i = pair.getIdx(); //if saved index not valid if( i < 0 || i >= data.size() || !data.get( i ).equals( pair.getLastValue() ) ) i = data.indexOf( pair.getLastValue() ); if( 0 <= i && i < data.size() ) data.set( i, pair.initValue ); } ); updatedItemsList.clear(); //ignore duplicates( list order may be change ) addedItemsList.forEach( ( item ) -> { data.remove( item ); }); addedItemsList.clear(); //restore items in not init order removedItemsList.forEach( ( item ) -> { data.add( item ); } ); removedItemsList.clear(); } finally { skip = false; } } public void commit() { for( Pair<T> pair : updatedItemsList ) { } } } <file_sep>/src/data/FirebirdConnection.java /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package data; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.util.Properties; /** * * @author P5 */ public class FirebirdConnection { /** * "jdbc:firebirdsql://p6/c:/data/clients.fdb?encoding=unicode_fss"; */ private static final String URL_PREFIX = "jdbc:firebirdsql://"; public static Connection getConnection( String url, String user, String passw ) throws SQLException { Properties props = new Properties(); props.setProperty( "user", user); props.setProperty( "password", <PASSWORD>); props.setProperty( "encoding", "unicode_fss"); String connectionURL = URL_PREFIX + url; return DriverManager.getConnection( connectionURL, props ); } } <file_sep>/src/data/wrappers/DBPropertyReference.java package data.wrappers; import data.entities.IEntity; import data.entities.IField; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; /** * * @author BelkinSergei * @param <T> */ public class DBPropertyReference<T> implements IDBPropertyReference<T> { private final Field field; private final Class<?> cls; private IField dbFieldInfo; private Method readMethod; private Method writeMethod; public Class<?> getCls() { return cls; } public IField getDbFieldInfo() { return dbFieldInfo; } @Override public boolean isReadable() { return readMethod != null; } @Override public boolean isWritable() { return writeMethod != null; } @Override public boolean isPersistable() { return dbFieldInfo != null; } private void initClassProperty() { String fieldName = field.getName(); String methodFieldName = fieldName.substring( 0, 1 ).toUpperCase() + fieldName.substring( 1 ); String setMethodName = "set" + methodFieldName; String getMethodName = "get" + methodFieldName; String isMethodName = "is" + methodFieldName; try { writeMethod = cls.getMethod( setMethodName, field.getType() ); try { readMethod = cls.getMethod( getMethodName, ( Class<?>[] ) null ); } catch( NoSuchMethodException ex ) { readMethod = cls.getMethod( isMethodName, ( Class<?>[] ) null ); } } catch( NoSuchMethodException ex ) { throw new RuntimeException( ex ); } } public DBPropertyReference( Field field, Class<?> cls ) { if( field == null || cls == null ) { throw new NullPointerException( "Parameters can't be null." ); } this.field = field; this.cls = cls; initClassProperty(); } public void initDBProperty( IEntity inst ) { if( inst == null ) { throw new NullPointerException( "IEntity must be set." ); } String fieldPropertyName = "FIELD_NAME_" + field.getName().toUpperCase(); for( IField e : ( ( IEntity ) inst ).getFieldNames() ) { String enumName = e.toString(); String enumValue = e.getDBFieldName(); if( fieldPropertyName.equals( enumName ) && enumValue != null && !enumValue.trim().isEmpty() ) { dbFieldInfo = e; break; } } } @Override public void set( Object inst, T arg ) { if( isWritable() ) { try { writeMethod.invoke( inst, arg ); } catch( IllegalAccessException | IllegalArgumentException | InvocationTargetException ex ) { throw new RuntimeException( ex ); } } } @Override public T get( Object inst ) { T result = null; if( isReadable() ) { try { result = ( T ) readMethod.invoke( inst, ( Object[] ) null ); } catch( IllegalAccessException | IllegalArgumentException | InvocationTargetException ex ) { throw new RuntimeException( ex ); } } return result; } @Override public String getDBFieldName() { String res = null; if( isPersistable() ) res = dbFieldInfo.getDBFieldName(); return res; } @Override public Class<?> getType() { return field.getType(); } @Override public String getName() { return field.getName(); } } <file_sep>/src/data/wrappers/IPropertyReference.java package data.wrappers; import javafx.beans.property.Property; /** * * @author <NAME> */ public interface IPropertyReference<T> { boolean isWritable(); boolean isReadable(); void set( Object inst, T arg ); T get( Object inst ); Class<?> getType(); String getName(); } <file_sep>/README.md # FXDPL Library that provides convinient access to data for FX controls. BasicDAO is needed to cache data. In this class cached data represented by javafx.collections.ObservableList. BasicDAO is intended for manipulation data, it holds information about how to retrive and save data through ObjectQuery. DataSource ties FX controls and BasicDAO through DataSource.RowObject and DataSource.RowObjectProperties. Add listener to these properties we can reflect data changes, cursor moving etc. <file_sep>/src/annotations/PersistedEntity.java package annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Class that marked with this annotations will be eligable for persistence * * @author <NAME> */ @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) public @interface PersistedEntity { String value(); } <file_sep>/src/data/wrappers/Wrapper.java package data.wrappers; import data.entities.IEntity; import java.sql.ResultSet; import java.util.List; /** * TODO: it's needed to rid of type {@code C} as parameterization of this interface. * This wrapper is needed when you use IEntity interface to provide info about DB fields. * * @author <NAME> * @param <T> type that reflect structure of retrieved data. * @param <C> class that implements IEntity and T interface. */ public interface Wrapper<T, C extends IEntity> { T wrap( ResultSet rs, Class<C> cls ); List<IDBPropertyReference> getProperties(); boolean hasProperty( String name ); IDBPropertyReference getPropertyByName( String name ); IDBPropertyReference getPropertyByFieldName( String fieldName ); } <file_sep>/src/data/entities/IField.java package data.entities; /** * Return {@code String} name of a Field that corresponds name of a field in DB. * * @author <NAME> */ public interface IField { String getDBFieldName(); } <file_sep>/src/data/wrappers/IDBPropertyReference.java package data.wrappers; /** * * @author <NAME> * @param <T> */ public interface IDBPropertyReference<T> extends IPropertyReference<T> { boolean isPersistable(); String getDBFieldName(); } <file_sep>/src/annotations/PersistedField.java package annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * By means of this annotation you make your fields elegiable to persistence. * * @author <NAME> */ @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.FIELD) public @interface PersistedField { String value(); } <file_sep>/src/data/wrappers/DBPropertyReferenceAnnotation.java package data.wrappers; import annotations.PersistedEntity; import annotations.PersistedField; import java.lang.annotation.Annotation; import java.lang.reflect.Field; /** * This implementation uses anotations to determine persistence. * * @author <NAME>. */ public class DBPropertyReferenceAnnotation<T> extends DBPropertyReferenceProps<T>{ private boolean init = false; private String dbFieldName = null; public String getDbFieldName() { return dbFieldName; } public DBPropertyReferenceAnnotation( String name, Class<?> cls ) { super( name, cls ); //alow non persistable fields // if( !isReadable() || !isWritable() || !isPersistable() ) throw new IllegalArgumentException( // "Property " + propertyRef.getName() + " is not persistable." ); } @Override public boolean isPersistable() { initDBProperty(); return dbFieldName != null; } protected void initDBProperty() { if( !init ) { init = true; Class<?> cls = propertyRef.getContainingClass(); for( Annotation a : cls.getAnnotations() ) { if( a instanceof PersistedEntity && !( (PersistedEntity) a).value().trim().isEmpty() ) { try { Field field = cls.getDeclaredField( propertyRef.getName() ); for( Annotation af : field.getAnnotations() ) { if( af instanceof PersistedField ) { dbFieldName = ((PersistedField) af).value(); if( dbFieldName == null || dbFieldName.trim().isEmpty() ) throw new NoSuchFieldException( "Annotation value is empty" ); } } } catch(NoSuchFieldException | SecurityException ex) { ex.printStackTrace(); dbFieldName = null; } break; } } } } @Override public String getDBFieldName() { String res = null; if( isPersistable() ) res = dbFieldName; return res; } } <file_sep>/src/data/wrappers/DefaultWrapper.java package data.wrappers; import data.entities.IEntity; import java.lang.reflect.Field; import java.sql.Date; import java.sql.ResultSet; import java.sql.Timestamp; import java.util.ArrayList; import java.util.List; /** * Basic implementation of {@code Wrapper}. * * @author <NAME>. * @param <T> returning type. * @param <C> type of class that will be instatianted. */ public class DefaultWrapper<T, C extends IEntity> implements Wrapper<T, C> { private boolean needInitProperties = true; private final List<IDBPropertyReference> properties = new ArrayList<>(); @Override public List<IDBPropertyReference> getProperties() { return properties; } protected T getInstance( Class<C> cls ) throws InstantiationException, IllegalAccessException { return ( T ) cls.newInstance(); } protected IDBPropertyReference getPropertyRef( T result, Field field, Class<?> cls ) { DBPropertyReference res = new DBPropertyReference( field, cls ); res.initDBProperty( ( IEntity ) result ); return res; } protected void handleClassFields(T result, ResultSet rs, Class<?> classLoop) { Field[] fields = classLoop.getDeclaredFields(); for( Field field : fields ) { IDBPropertyReference propertyRef = null; try { propertyRef = getPropertyRef( result, field, classLoop ); if( propertyRef.isReadable() && propertyRef.isWritable() && propertyRef.isPersistable() ) { if( needInitProperties ) properties.add( propertyRef ); String fieldType = propertyRef.getType().getName(); String fieldName = propertyRef.getDBFieldName(); switch( fieldType ) { case "java.lang.Short": propertyRef.set( result, rs.getShort( fieldName ) ); break; case "java.lang.Integer": propertyRef.set( result, rs.getInt( fieldName ) ); break; case "java.lang.Long": propertyRef.set( result, rs.getLong( fieldName ) ); break; case "java.lang.Double": propertyRef.set( result, rs.getDouble( fieldName ) ); break; case "java.lang.Boolean": propertyRef.set( result, rs.getBoolean( fieldName ) ); break; case "java.math.BigDecimal": propertyRef.set( result, rs.getBigDecimal( fieldName ) ); break; case "java.time.LocalDate": { Date date = rs.getDate( fieldName ); propertyRef.set( result, date == null ? null : date.toLocalDate() ); break; } case "java.time.LocalDateTime": { Timestamp date = rs.getTimestamp( fieldName ); propertyRef.set( result, date == null ? null : date.toLocalDateTime() ); break; } default: propertyRef.set( result, rs.getString( fieldName ) ); } } } catch( Exception ex) { /** * Skip bad fields. */ String msg = "Error while handling field: "; if( propertyRef != null ) { msg += propertyRef.toString(); } else { msg += "couldn't obtain propertyRef."; } System.err.println( msg ); ex.printStackTrace(); } } } /** * This implementation does not throw any exceptions. * * @param rs {@code ResultSet} instance from which data will be extracted. * @param cls {@code Class} that will be instatiated. * @return <T> It is allowed to return {@code null} value. */ @Override public T wrap( ResultSet rs, Class<C> cls ) { T result = null; try { result = getInstance( cls ); Class<?> classLoop = cls; /** * Because {@code Class.getDeclaredFields()) provides all fields * that declared only in given class we need to obtain all super classes fields * that this one inherits. */ while( classLoop != Object.class ) { try { handleClassFields( result, rs, classLoop ); } catch( Exception e ) { /** * If something went wrong we skip that portion of class. */ e.printStackTrace(); } classLoop = classLoop.getSuperclass(); } //init properties list only for first time needInitProperties = false; } catch( Exception ex ) { ex.printStackTrace(); } return result; } @Override public boolean hasProperty( String name ) { boolean res = false; if( properties != null && !properties.isEmpty() ) { res = properties.stream().filter( c -> c.getName().equals( name ) ).count() > 0; } return res; } protected IDBPropertyReference getProperty( String name, String fieldName ) { IDBPropertyReference res = null; if( properties != null && !properties.isEmpty() ) { res = properties.stream().filter( ( p ) -> { return p.getName().equals( name ) || p.getDBFieldName().equals( fieldName ); } ).findFirst().orElse( null ); } return res; } @Override public IDBPropertyReference getPropertyByName( String name ) { return getProperty( name, null ); } @Override public IDBPropertyReference getPropertyByFieldName( String fieldName ) { return getProperty( null, fieldName ); } }
d829d305d835e8ef036aaf68f1e07e95c3cd09e4
[ "Markdown", "Java" ]
12
Java
SpaceBaloon/FXDPL
88d9a0bfb76d4fd7c7e60c4ef9a8b1124baf93ed
69362e54a7680890ebab97a3358ec1638b323139
refs/heads/master
<repo_name>InDIOS/trebor-tools<file_sep>/types.d.ts type AttrTypes = string | number | RegExp | null | boolean; type AttrParams = string[] | ObjectLike<AttrDefinition>; type DirectiveDefinition = (inst: Component, options: DirectiveOptions, node: HTMLElement) => void | DirectiveDefObject; type TemplateFn = (component: Component) => ComponentTemplate; type IterateKey<T> = T extends any[] ? number : string; type IterateValue<T> = T extends any[] ? T[number] : T[keyof T]; type PluginFn = (this: Component, ctor: ComponentConstructor, pluginOptions?: ObjectLike<any>) => void; interface ObjectLike<T> { [key: string]: T; } interface DirectiveDefObject { $init?(inst: Component, options: DirectiveOptions, node: HTMLElement): void; $inserted?(inst: Component, options: DirectiveOptions, node: HTMLElement): void; $update(inst: Component, options: DirectiveOptions, node: HTMLElement): void; $destroy?(inst: Component, options: DirectiveOptions, node: HTMLElement): void; } interface AttrDefinition { required?: boolean; type: string | Function; validator?(value: any): boolean; default?: AttrTypes | (() => AttrTypes | Object); } interface DirectiveOptions { value: any; expression: string; modifiers: ObjectLike<boolean>; } interface ComponentOptions extends ComponentHooks { model?: ObjectLike<any>; attrs?: string[] | ObjectLike<AttrDefinition>; filters?: ObjectLike<(...args: any[]) => any>; children?: ObjectLike<ComponentConstructor>; directives?: ObjectLike<DirectiveDefinition>; } interface ComponentTemplate { $create(): void; $mount(parent: string | Element, sibling?: string | boolean | Element): void; $update(state: Component, ...args: any[]): void; $unmount(): void; $destroy(): void; } interface ComponentHooks { willCreate?(this: Component): void; willMount?(this: Component): void; willUpdate?(this: Component): void; willUnmount?(this: Component): void; willDestroy?(this: Component): void; didCreate?(this: Component): void; didMount?(this: Component): void; didUpdate?(this: Component): void; didUnmount?(this: Component): void; didDestroy?(this: Component): void; } interface Component extends ComponentTemplate { $parent: Component; $parentEl: HTMLElement; $siblingEl: HTMLElement; readonly $refs: ObjectLike<HTMLElement[]>; readonly $slots: ObjectLike<DocumentFragment>; readonly $filters: ObjectLike<(...args: any[]) => any>; readonly $options: ComponentOptions; readonly $children: Component[]; readonly $directives: ObjectLike<DirectiveDefinition>; $get<T>(path: string): T; $set<T>(path: string, value: T): void; $update(): void; $on(event: string, handler: (data?: any) => void): { $off(): void }; $once(event: string, handler: (data?: any) => void): void; $fire(event: string, data?: any): void; $notify(key: string): void; $observe(key: string | string[], handler: () => void): { $unobserve(): void }; $watch(key: string, handler: (oldValue?: any, newValue?: any) => void): { $unwatch(): void }; [key: string]: any; } interface ComponentConstructor { new <T extends Component>(attrs?: string[] | ObjectLike<AttrDefinition>, parent?: Component): T; plugin(fn: PluginFn, options?: ObjectLike<any>): void; prototype: Component; }<file_sep>/src/dom.ts import { _$List } from './list'; import { PROP_MAP } from './constants'; import { _$toString, _$isString, _$isType, _$isValueAttr, _$hasProp } from './utilities'; export function _$select(selector: string | Element, parent?: Element): HTMLElement { return _$isString(selector) ? (parent || document).querySelector(<string>selector) : <HTMLElement>selector; } export function _$docFragment() { return document.createDocumentFragment(); } export function _$append(parent: Element, child: Element, sibling?: boolean | Element) { if (_$isType(sibling, 'boolean') && sibling) parent.parentElement.replaceChild(child, parent); else if (!sibling) parent.appendChild(child); else parent.insertBefore(child, <Element>sibling); } export function _$assignEl(source: Element, dest: Element) { const { childNodes, attributes } = source; for (let i = 0; i < childNodes.length; i++) { _$append(dest, <Element>childNodes[i]); } for (let i = 0; i < attributes.length; i++) { const attr = attributes[i]; dest.setAttributeNS(source.namespaceURI, attr.name, attr.value); } source.parentElement.replaceChild(dest, source); return dest; } export function _$removeEl(el: Element, parent: Element) { let root = parent || el.parentElement; if (root) root.removeChild(el); } export function _$el<T extends keyof HTMLElementTagNameMap>(tagName?: T) { return document.createElement(tagName || 'div'); } export function _$svg<T extends keyof SVGElementTagNameMap>(tagName?: T) { return document.createElementNS('http://www.w3.org/2000/svg', tagName || 'svg'); } export function _$text(content?: string) { return document.createTextNode(content || ''); } export function _$comment(content?: string) { return document.createComment(content || ''); } export function _$setAttr(el: Element & { _value?: any }, attrAndValue: [string, any]) { let [attr, value] = attrAndValue; el.setAttribute(attr, _$toString(value)); if (_$isValueAttr(attr) && !_$isString(value)) el[PROP_MAP._] = value; } export function _$getAttr(el: Element, attr: string) { return _$isValueAttr(attr) ? _$getValue(<HTMLInputElement>el) : el.getAttribute(attr); } export function _$getValue(el: (HTMLInputElement | HTMLSelectElement | HTMLTextAreaElement | HTMLOptionElement) & { _value?: any }) { return _$hasProp(el, PROP_MAP._) ? el[PROP_MAP._] : el[PROP_MAP.v]; } export function _$addListener(el: HTMLElement, event: string, handler: EventListenerOrEventListenerObject) { el.addEventListener(event, handler, false); } export function _$updateListener(el: HTMLElement, event: string, oldHandler: EventListenerOrEventListenerObject, newHandler: EventListenerOrEventListenerObject) { _$removeListener(el, event, oldHandler); _$addListener(el, event, oldHandler = newHandler); return oldHandler; } export function _$removeListener(el: HTMLElement, event: string, handler: EventListenerOrEventListenerObject) { el.removeEventListener(event, handler, false); } export function _$bindGroup(input: HTMLInputElement, selection: string[]) { let _value = _$getValue(input); let _$index = selection.indexOf(_value); input.checked && !~_$index ? selection.push(_value) : selection.splice(_$index, 1); } export function _$bindMultiSelect(select: HTMLSelectElement, selections: any[]) { if (!selections.length) return; let { options } = select; for (let i = 0; i < options.length; i++) { options[i].selected = !!~selections.indexOf(_$getValue(options[i])); } } export function _$updateMultiSelect(select: HTMLSelectElement, obj: Component, prop: string) { let items = []; let selection = obj[prop]; let { selectedOptions } = select; for (let i = 0; i < selectedOptions.length; i++) { items.push(_$getValue(selectedOptions[i])); } obj[prop] = new _$List(items, selection['_root'], selection['_key']); obj.$update(); } export function _$insertStyle(id: string, css: string) { let isNew = false; let style = _$select(`#${id}`, document.head); if (!style) { isNew = true; style = _$el('style'); style.id = id; _$setAttr(style, ['refs', 1]); } if (style.textContent !== css) { style.textContent = css; } if (isNew) { _$append(document.head, style); } else { let count = +_$getAttr(style, 'refs'); _$setAttr(style, ['refs', ++count]); } } export function _$removeStyle(id: string) { let style = _$select(`#${id}`, document.head); if (style) { let count = +_$getAttr(style, 'refs'); if (--count === 0) { _$removeEl(style, document.head); } else { _$setAttr(style, ['refs', count]); } } }<file_sep>/rollup.config.js const path = require('path'); const input = path.join(__dirname, './src/index.js'); const output = [{ format: 'es', file: path.join(__dirname, 'index.js'), sourcemap: true }]; module.exports = { input, output }; <file_sep>/src/baseComp.ts import { _$List } from './list'; import { _$getValue } from './dom'; import { PROPS, PROP_MAP, TPS } from './constants'; import { _$each, _$define, _$assign, _$isType, _$isString, _$isFunction, _$hasProp, _$toType, _$directive, _$isArray, _$toPlainObject, _$accesor, _$subscribers, _$isValueAttr, _$toString, _$extends } from './utilities'; function _$BaseComponent(attrs: AttrParams, template: TemplateFn, options: ComponentOptions, parent: Component) { const self = this; const _$set = (prop: string, value: any) => { _$define(self, prop, { value, writable: true }); }; if (!attrs) attrs = {}; _$each(PROPS, prop => { _$define(self, prop, { value: {} }); }); _$set('$parent', parent || null); _$set('$children', []); _$set(PROP_MAP.s, {}); _$set('$options', options); const opts: ComponentOptions = self.$options; if (!opts.attrs) opts.attrs = {}; if (!opts.children) opts.children = {}; _$each(TPS, (plugin) => { plugin.fn.call(self, _$BaseComponent, plugin.options); }); if (opts.filters) _$assign(self.$filters, opts.filters); if (opts.directives) _$each(opts.directives, (drt, k) => { self.$directives[k] = _$directive(drt); }); _$each(opts.attrs, (attrOps, key) => { _$define(self, <string>(_$isType(key, 'number') ? attrOps : key), { get() { if (_$isString(attrOps)) { let value = attrs[<string>attrOps]; return _$isFunction(value) ? value() : value; } else { if (!_$hasProp(attrs, <string>key) && (<AttrDefinition>attrOps).required) { return console.error(`Attribute '${key}' is required.`); } else { let value = _$isFunction(attrs[key]) ? attrs[key]() : attrs[key]; if (value === void 0 && _$hasProp(attrOps, 'default')) { const def = (<AttrDefinition>attrOps).default; value = _$isFunction(def) ? (<Function>def)() : def; } const typ = (<AttrDefinition>attrOps).type; if (typ && !_$isType(value, typ) && (<AttrDefinition>attrOps).required) { return console.error(`Attribute '${key}' must be type '${typ}'.`); } value = _$toType(value, value === void 0 ? 'undefined' : typ, self, <string>key); if (value !== void 0 && _$hasProp(attrOps, 'validator')) { const validator = (<AttrDefinition>attrOps).validator; if (_$isFunction(validator) && !validator(value)) { return console.error(`Assigment '${key}'='${JSON.stringify(value)}' invalid.`); } } return value; } } }, set() { console.error(`'${key}' is read only.`); }, enumerable: true, configurable: true }); }); let data = opts.model || {}; for (const key in data) { if (_$hasProp(data, key)) { const desc = Object.getOwnPropertyDescriptor(data, key); if (desc.value && _$isArray(desc.value)) { desc.value = new _$List(desc.value, self, key); } else { if (desc.get) { let getter = desc.get; desc.get = function () { let value = getter.call(self); if (_$isArray(value)) value = new _$List(value, self, key); return value; }; } if (desc.set) { let setter = desc.set; desc.set = function (v: any) { if (_$isArray(v)) v = new _$List(v, self, key); setter.call(self, v); }; } } _$define(self, key, desc); } } const tpl = template(self); _$each(tpl, (value, key) => { _$define(self, key, { value: (function (key) { const hook = key[1].toUpperCase() + key.slice(2); const bhook = opts[`will${hook}`]; const ahook = opts[`did${hook}`]; return function () { bhook && bhook.call(this); key === '$update' ? value.call(this, this) : value.apply(this, arguments); ahook && ahook.call(this); }; })(key) }); }); _$define(self, '$data', { get() { return _$toPlainObject(this); } }); } _$assign(_$BaseComponent[PROP_MAP.h], { $get(path: string) { return _$accesor(this, path); }, $set(path: string, value: any) { _$accesor(this, path, value); }, $on(event: string, handler: Function) { if (!this[PROP_MAP.e][event]) { this[PROP_MAP.e][event] = []; } const i = this[PROP_MAP.e][event].push(handler); return { $off: () => { this[PROP_MAP.e][event].splice(i - 1, 1); } }; }, $once(event: string, handler: Function) { const e = this.$on(event, args => { handler(args); e.$off(); }); }, $fire(event: string, data: any) { if (this[PROP_MAP.e][event]) { _$each(this[PROP_MAP.e][event], handler => { handler(data); }); } }, $notify(key: string) { if (this[PROP_MAP.s][key]) { _$each(this[PROP_MAP.s][key], suscriber => { suscriber(); }); } }, $observe(deps: string | string[], listener: Function) { const subs: { sub: string, i: number }[] = []; if (_$isArray(deps)) { _$each(<string[]>deps, dep => { subs.push({ sub: dep, i: _$subscribers.call(this, dep, listener) }); }); } else { subs.push({ sub: <string>deps, i: _$subscribers.call(this, deps, listener) }); } return { $unobserve: () => { _$each(subs, sub => { this[PROP_MAP.s][sub.sub].splice(sub.i, 1); }); } }; }, $watch(key: string, watcher: Function) { if (!this[PROP_MAP.w][key]) { this[PROP_MAP.w][key] = []; } const i = this[PROP_MAP.w][key].push(watcher.bind(this)); return { $unwatch: () => { this[PROP_MAP.w][key].splice(i - 1, 1); } }; } }); export function _$Ctor(tpl: Function, options: Object) { const ctor: ComponentConstructor = <any>function (_$attrs, _$parent) { _$BaseComponent.call(this, _$attrs, tpl, options, _$parent); !_$parent && this.$create(); }; ctor.plugin = (fn: PluginFn, options?: ObjectLike<any>) => { TPS.push({ options, fn }); }; _$extends(ctor, _$BaseComponent); return ctor; }<file_sep>/src/index.ts import { _$Ctor } from './baseComp'; import { _$removeChild, _$isType, _$noop, _$componentUpdate, _$bindClasses, _$bindStyle, _$forLoop, _$destroyComponent, _$setElements, _$conditionalUpdate, _$emptyElse, _$htmlUpdate, _$addChild, _$each, _$bindBooleanAttr, _$setReference, _$isKey, _$textUpdate, _$tagUpdate, _$removeReference, _$toString, _$bindUpdate, _$emptySlot, _$appendToSlot, _$declareSlots } from './utilities'; import { _$select, _$docFragment, _$append, _$assignEl, _$removeEl, _$getAttr, _$addListener, _$updateListener, _$removeListener, _$insertStyle, _$bindGroup, _$updateMultiSelect, _$el, _$svg, _$text, _$comment, _$setAttr, _$removeStyle, _$getValue, _$bindMultiSelect, } from './dom'; export { _$bindUpdate, _$comment, _$setElements, _$emptySlot, _$appendToSlot, _$declareSlots, _$updateMultiSelect, _$componentUpdate, _$htmlUpdate, _$tagUpdate, _$bindBooleanAttr, _$removeReference, _$addChild, _$textUpdate, _$getValue, _$text, _$conditionalUpdate, _$noop, _$toString, _$setReference, _$isType, _$isKey, _$select, _$docFragment, _$append, _$removeChild, _$bindGroup, _$emptyElse, _$Ctor, _$bindMultiSelect, _$setAttr, _$removeEl, _$assignEl, _$el, _$bindStyle, _$forLoop, _$each, _$insertStyle, _$removeStyle, _$getAttr, _$addListener, _$updateListener, _$removeListener, _$bindClasses, _$destroyComponent, _$svg, };<file_sep>/src/list.ts import { PROP_MAP } from './constants'; import { _$isType, _$isArray, _$define, _$assign, _$extends, _$dispatch } from './utilities'; const array = Array[PROP_MAP.h]; export function _$toArgs(args: IArguments, start: number = 0): any[] { return array.slice.call(args, start); } function _$arrayValues(list, value: any[], root: Component, key: string) { array.push.apply(list, value.map((v, i) => { if (list.length !== 0) i += list.length; return !(_$isType(v, _$List)) && _$isArray(v) ? new _$List(v, root, `${key}.${i}`) : v; })); } export function _$List(value: any[], root: Component, key: string) { let self = this; Array.apply(self, [value.length]); let desc = { writable: false, configurable: false, enumerable: false }; _$define(self, '_key', _$assign({ value: key }, desc)); _$define(self, '_root', _$assign({ value: root }, desc)); _$arrayValues(self, value, root, key); desc.writable = true; _$define(self, 'length', _$assign({ value: self.length }, desc)); } _$extends(_$List, Array); ['pop', 'push', 'reverse', 'shift', 'sort', 'fill', 'unshift', 'splice'].forEach(method => { _$List[PROP_MAP.h][method] = function () { let self = this; const old = self.slice(); let result; if (method === 'push') { _$arrayValues(self, _$toArgs(arguments), self._root, self._key); result = self.length; } else { result = array[method].apply(self, arguments); } _$dispatch(self._root, self._key, old, self.slice()); return result; }; }); _$List[PROP_MAP.h].pull = function (index: number) { let self = this; let items = _$toArgs(arguments, 1); let length = self.length; if (index > length) { length = index + 1; const pull = new Array(index - self.length); pull.push.apply(pull, items); for (let i = 0; i < length; i++) { if (i === index) { self.push.apply(self, pull); } } } else { self.splice.apply(self, [index, 1].concat(items)); } };<file_sep>/src/constants.ts export const PROPS = ['$slots', '$refs', '$filters', '$directives', '_events', '_watchers']; export const PROP_MAP = { p: '__TP__', v: 'value', _: '_value', s: '_subscribers', e: '_events', w: '_watchers', h: 'prototype' }; export const TPS: { options: ObjectLike<any>, fn: PluginFn }[] = window[PROP_MAP.p] || (window[PROP_MAP.p] = []); <file_sep>/src/utilities.ts import { PROP_MAP } from './constants'; import { _$toArgs, _$List } from './list'; import { _$el, _$getAttr, _$setAttr, _$select, _$assignEl, _$removeEl, _$docFragment, _$append } from './dom'; function _$toLowerCase(str: string) { return str.toLowerCase(); } export function devlog(type: 'info' | 'warn' | 'error', ...msgs: any[]) { console[type](...msgs); } export const _$assign = Object['assign'] || function (t: Object) { for (let s, i = 1, n = arguments.length; i < n; i++) { s = arguments[i]; for (const p in s) if (_$hasProp(s, p)) t[p] = s[p]; } return t; }; export function _$apply(callee: Function, args: any[], globs: any[], thisArg: any = null) { return callee.apply(thisArg, args.concat(globs)); } export function _$isValueAttr(attr: string) { return attr === 'value'; } export function _$subscribers(dep: string, listener: Function) { if (!this[PROP_MAP.s][dep]) { this[PROP_MAP.s][dep] = []; } return this[PROP_MAP.s][dep].push(listener.bind(this)) - 1; } export function _$define(obj: Object, key: string, desc: PropertyDescriptor) { Object.defineProperty(obj, key, desc); } export function _$dispatch(root: Component, key: string, oldVal, value) { root.$notify(key); if (root[PROP_MAP.w][key]) { _$each(root[PROP_MAP.w][key], watcher => { watcher(oldVal, value); }); } root.$update(); } export function _$extends(ctor: Function, exts: Function) { ctor[PROP_MAP.h] = Object.create(exts[PROP_MAP.h]); ctor[PROP_MAP.h].constructor = ctor; } export function _$isType(value: any, type: string | Function) { return _$type(type) === 'string' ? (<string>type).split('\|').some(t => t.trim() === _$type(value)) : value instanceof <Function>type; } export function _$isObject(obj) { return _$isType(obj, 'object'); } export function _$isArray(obj) { return Array.isArray ? Array.isArray(obj) : _$isType(obj, 'array'); } export function _$isFunction(obj) { return _$isType(obj, 'function'); } export function _$isString(obj) { return _$isType(obj, 'string'); } export function _$toType(value, type, root: Component, key: string) { switch (type) { case 'date': return new Date(value); case 'string': return _$toString(value); case 'number': return +value; case 'boolean': return _$isString(value) && !value ? true : !!value; case 'array': return _$isType(value, _$List) ? value : new _$List(value, root, key); default: return value; } } function _$type(obj: any) { return _$toLowerCase(/ (\w+)/.exec({}.toString.call(obj))[1]); } export function _$hasProp(obj: Object, prop: string) { return obj.hasOwnProperty(prop); } export function _$directive(dd: DirectiveDefinition): DirectiveDefObject { const hasProp = (prop, instance, options, element) => _$isObject(dd) && dd[prop] && dd[prop](instance, options, element); return { $init(instance, options, element) { hasProp('$init', instance, options, element); }, $inserted(instance, options, element) { hasProp('$inserted', instance, options, element); }, $update(instance, options, element) { if (_$isFunction(dd)) { dd(instance, options, element); } else { hasProp('$update', instance, options, element); } }, $destroy(instance, options, element) { hasProp('$destroy', instance, options, element); } }; } export function _$noop() { } export function _$addChild(inst: Component, Child: ComponentConstructor, attrs: string[] | ObjectLike<AttrDefinition>) { let child: Component = null; if (Child) { child = new Child(attrs, inst); inst.$children.push(child); } return child; } export function _$removeChild(inst: Component, child: Component) { let index = inst.$children.indexOf(child); index >= 0 && inst.$children.splice(index, 1); } export function _$toString(obj: any): string { const str: string = _$type(obj); return !/null|undefined/.test(str) ? obj.toString() : str; } export function _$toPlainObject(obj: Component) { const data: ObjectLike<any> = {}; _$each(_$isObject(obj) ? obj : {}, (_v, k) => { if (k[0] !== '$' && !_$isFunction(obj[k])) { if (_$isType(obj[k], _$List)) { data[k] = obj[k].map(_$toPlainObject); } else if (_$isObject(obj[k])) { data[k] = _$toPlainObject(obj[k]); } else { data[k] = obj[k]; } } }); return _$isObject(obj) ? data : obj; } export function _$setReference(refs: Object, prop: string, node: HTMLElement) { if (!_$hasProp(refs, prop)) { const value = []; _$define(refs, prop, { get: () => value.length <= 1 ? value[0] : value, set: val => { val && !~value.indexOf(val) && value.push(val); }, enumerable: true, configurable: true }); } refs[prop] = node; } export function _$accesor(object: Component, path: string, value?: any) { return path.split('.').reduce((obj, key, i, arr) => { if (_$isType(value, 'undefined')) { if (obj == null) { arr.splice(0, arr.length); return i > 0 && obj === null ? obj : undefined; } } else { if (i === arr.length - 1) { if (_$isType(obj, _$List) && _$toString(+key) === key) { obj.pull(+key, value); } else { let oldVal = obj[key]; obj[key] = !_$isType(value, _$List) && _$isArray(value) ? new _$List(value, object, key) : value; _$dispatch(object, path, oldVal, obj[key]); } } else if (!_$isObject(obj[key])) { obj[key] = {}; } } return obj ? obj[key] : null; }, object); } export function _$emptyElse() { return { type: 'empty-else', $create: _$noop, $mount: _$noop, $update: _$noop, $destroy: _$noop }; } export function _$isKey(event: KeyboardEvent, key: string) { return _$toLowerCase(event.key) === key || !!event[`${key}Key`]; } export function _$emptySlot(inst: Component, slot: string) { let slots = inst.$slots; return slots[slot] && !slots[slot].hasChildNodes() ? (slots[slot] = _$docFragment()) : null; } export function _$appendToSlot(slots: ObjectLike<DocumentFragment>, slot: string, el: HTMLElement) { !slots[slot].firstChild && _$append(<any>slots[slot], el); } export function _$declareSlots($slots: ObjectLike<DocumentFragment>, slots: string[]) { _$each(slots, slot => { $slots[slot] = _$docFragment(); }); } export function _$bindClasses(value: string | ObjectLike<boolean> | (string | ObjectLike<boolean>)[]) { let classes = ''; if (_$isString(value)) { classes += ` ${value}`; } else if (_$isArray(value)) { classes = (<any[]>value).map(_$bindClasses).join(' '); } else if (_$isObject(value)) { for (let key in <Object>value) if (_$hasProp(value, key) && value[key]) classes += ` ${key}`; } return classes.trim(); } export function _$bindStyle(value: string | ObjectLike<any>) { let el = _$el(); if (_$isObject(value)) { const { style } = <HTMLElement>el; _$each(value, (val, prop) => { if (val !== style[prop]) style[prop] = val; }); return style.cssText; } else if (_$isString(value)) { return value; } else { return ''; } } export function _$conditionalUpdate(block: { type: string } & ComponentTemplate, condition: Function, parent: Element, anchor: Element, inst: Component) { let globs = _$toArgs(arguments, 5); if (block && block.type === _$apply(condition, [inst], globs).type) { _$apply(block.$update, [inst], globs, block); } else { block && block.$destroy(); block = _$apply(condition, [inst], globs); block.$create(); block.$mount(parent || inst.$parentEl, anchor); } return block; } export function _$bindUpdate(el: (HTMLInputElement | HTMLSelectElement | HTMLTextAreaElement) & { _value: any }, binding: [string, any]) { let [attr, value] = binding; let _value: string = _$toString(value); if (_$isValueAttr(attr)) { if (el[attr] !== _value) el[attr] = _value; el[PROP_MAP._] = value; } else if (_$getAttr(el, attr) !== _value) { _$setAttr(el, [attr, _value]); } } export function _$bindBooleanAttr(el: HTMLElement, attrAndValue: [string, any]) { let [attr, value] = attrAndValue; el[attr] = value == null || value === false ? (el.removeAttribute(attr), false) : (_$setAttr(el, [attr, '']), true); } export function _$textUpdate(text: Text, value: string) { if (text.data !== (value = _$toString(value))) text.data = value; } export function _$tagUpdate<T extends keyof HTMLElementTagNameMap>(node: HTMLElement, tag: T) { return _$toLowerCase(tag) !== _$toLowerCase(node.tagName) ? _$assignEl(node, _$el(tag)) : node; } export function _$removeReference(refs: Object, prop: string, node: HTMLElement) { let nodes = refs[prop]; _$isArray(nodes) ? refs[prop].splice(nodes.indexOf(node), 1) : (delete refs[prop]); } export function _$htmlUpdate(node: HTMLElement, value: string) { if (node.innerHTML !== (value = _$toString(value))) node.innerHTML = value; } export function _$componentUpdate(parent: Component, Ctor: ComponentConstructor, inst: Component, value: ComponentConstructor, attrs: AttrParams, el: HTMLElement, sibling: HTMLElement) { if (value === Ctor) { inst && inst.$update(); } else { Ctor = value; if (inst) { inst.$destroy(); _$removeChild(parent, inst); } if (inst) { inst = _$addChild(parent, Ctor, attrs); inst.$create(); inst.$mount(el || parent.$parentEl, sibling); } } return [inst, Ctor]; } export function _$destroyComponent(component: Component) { component.$unmount(); component.$parent = null; component.$parentEl = null; component.$siblingEl = null; component.$children.splice(0, component.$children.length); } export function _$setElements(component: Component, parent: HTMLElement, sibling?: HTMLElement) { let brother = _$select(sibling); component.$siblingEl = brother; component.$parentEl = sibling && brother.parentElement || _$select(parent); } export function _$forLoop(root: Component, obj: any[], loop: (...args: any[]) => ComponentTemplate) { let items: ObjectLike<ComponentTemplate> = {}, loopParent: Element, loopSibling: Element; let globs = _$toArgs(arguments, 3); _$each(obj, (item, i, index) => { items[i] = _$apply(loop, [root, item, i, index], globs); }); return { $create() { _$each(items, item => { item.$create(); }); }, $mount(parent, sibling) { loopParent = _$select(parent); loopSibling = _$select(sibling); _$each(items, item => { item.$mount(loopParent, loopSibling); }); }, $update(root: Component, obj: any[]) { let globs = _$toArgs(arguments, 2); _$each(items, (item, i, index) => { if (obj[i]) { _$apply(item.$update, [root, obj[i], i, index], globs, item); } else { item.$destroy(); delete items[i]; } }); _$each(obj, (item, i, index) => { if (!items[i]) { items[i] = _$apply(loop, [root, item, i, index], globs); items[i].$create(); items[i].$mount(loopParent, loopSibling); } }); }, $destroy() { _$each(items, item => { item.$destroy(); }); } }; } export function _$each<T>(obj: T, cb: (value: IterateValue<T>, key: IterateKey<T>, index?: number) => void) { let i = 0; for (const key in obj) { if (_$hasProp(obj, key)) { cb(<any>obj[key], <any>(isNaN(+key) ? key : +key), i++); } } }<file_sep>/README.md # trebor-tools [![NPM Version][npm-image]][npm-url] [![NPM Downloads][downloads-image]][downloads-url] [![Release][github-img]][github-url] [![Build Status][travis-image]][travis-url] A set of internal tools that is used in TreborJS [npm-image]: https://img.shields.io/npm/v/trebor-tools.svg [npm-url]: https://npmjs.org/package/trebor-tools [downloads-image]: https://img.shields.io/npm/dm/trebor-tools.svg [downloads-url]: https://npmjs.org/package/trebor-tools [github-img]: https://img.shields.io/github/release/InDIOS/trebor-tools.svg [github-url]: https://github.com/InDIOS/trebor-tools/releases/latest [travis-image]: https://img.shields.io/travis/InDIOS/trebor-tools/master.svg?label=build [travis-url]: https://travis-ci.org/InDIOS/trebor-tools
26c5d73c9f3dfe6c3c7fc3dd95b64a935def448e
[ "JavaScript", "TypeScript", "Markdown" ]
9
TypeScript
InDIOS/trebor-tools
c1ed2b77a079ec12dd32dc75e327599554355bab
a31f3bc4429ee720c22d9d77613de09fba22e243
refs/heads/master
<file_sep>// <NAME>, CS 145, Spring 2017, Section A, #2723 // Programming Assignment #4, 5/4/17 // Object Class - GrammarSolver // // Hosted Javadocs => http://jlandowski.greenrivertech.net/Javadocs/IT145/assn4/ import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.Set; import java.util.TreeSet; import java.util.Random; import java.util.Arrays; // DEBUGGING /** * This object is utilized by GrammarMain.java and accepts an immutable * list of grammar rules, seperated by "::=" and "|" then parses it and * returns randomly generated sentences from it when calling generate(). * * @author <NAME> * @version %I% %G% * @since 1.0 */ public class GrammarSolver { Map<String, String[][]> ruleMap = new TreeMap<String, String[][]>(); Random rand; // GLOBAL RAND TO AVOID RE-SEEDS /** * Constructor, takes an unmodifiable list of grammar lines and creates a rule map * out of it, initializes GrammarSolver's random object. * * @param rules The list of grammar lines used to populate the ruleMap * @throws IllegalArgumentException if list is null or list length &lt; 1 * @pre List elements must be seperate lines of grammar rules, starting * with the non-terminal follow by ::= and its terminals, each AND * terminal must be seperated by whitespace and each OR terminal * seperated by pipes (|). If non-terminal references itself as a * terminal, must include a way to end the loop eventually, such as * giving another terminal option with a pipe (|). * @pre List must not be empty * @pre List must not be null * @post Instantiates a GrammarSolver object with initialized rule map */ public GrammarSolver(List<String> rules) { rand = new Random(); populateMap(rules); // throws IllegalArgumentException if duplicate NonTerminal if(rules == null || rules.size() < 1) throw new IllegalArgumentException(); // DEBUG // printMapDEBUG(); } /** * Checks to see if the entered symbol exists in the rule map, returns true if so. * * @param symbol The non-terminal string entered by the user * @throws IllegalArgumentException if symbol is null or list length &lt; 1 * @return True if symbol is a Non-Terminal, false otherwise * @pre String must not be empty * @pre String must not be null */ public boolean contains(String symbol) { if(symbol == null || symbol.length() < 1) throw new IllegalArgumentException(); return ruleMap.containsKey(symbol); } /** * Returns a sorted set of Non-Terminals available in the rule map. * * @return A set of non-terminal keys to generate phrases from */ public Set<String> getSymbols() { return ruleMap.keySet(); // TreeMap sorts it. So.... yay? } /** * Generates a phrase of words from the given symbol using the rule map. * * @param symbol The non-terminal string entered by the user * @throws IllegalArgumentException if symbol is null or list length &lt; 1 * @return The symbol given if it is not a non-terminal, otherwise a string of words * randomly picked from the terminals of the symbol given. * @pre Symbol given must exist as a non-terminal. * @pre String must not be empty. * @pre String must not be null. * @post Will return a string of random terminals from the given * non-terminal, assuming the symbol exists as a non-terminal, * it contains terminal values to pick from, the syntax rules * were followed, and a non-terminal referencing itself does * not create an infinite loop. */ public String generate(String symbol) { // IF SYMBOL IS NOT KEY RETURN IT, SHOULDNT HAPPEN THOUGH B/C OF CONTAINS() if(symbol == null || symbol.length() < 1) throw new IllegalArgumentException(); else if(!ruleMap.containsKey(symbol)) return symbol; return pickRandomWord(ruleMap.get(symbol)); } //================================================================= //---------------------------HELPERS------------------------------- //================================================================= private void populateMap(List<String> rules) { for(String line : rules) { // SPLIT ON ::= SURROUNDED BY ANY SPACE // EX: "<derp> ::= durr | hurr durr | <blah> <dah>" // EX: ["<derp>", "durr | hurr durr | <blah> <dah>"] String[] firstSplit = line.split("[ \t]*::=[ \t]*"); // EX: "<derp>" String nonTerm = firstSplit[0].trim(); if(ruleMap.containsKey(nonTerm)) throw new IllegalArgumentException(); // EX: "durr | hurr durr | <blah> <dah>" String terms = firstSplit[1]; // SPLIT ON PIPE "|" WITH ANY SPACE // EX: ["durr", "hurr durr", "<blah> <dah>"] String[] firstTermSplit = terms.split("[ \t]*[|][ \t]*"); // CREATE EMPTY 2D STRING SIZE OF TOTAL TERMS // ["durr", "hurr durr", "<blah> <dah>"] == 3 TERMS String[][] fullTerminals = new String[firstTermSplit.length][]; // BREAK UP EACH TERM INTO ANOTHER ARRAY for(int i = 0; i < firstTermSplit.length; i++) { String term = firstTermSplit[i]; // SPLIT SEPERATED WORDS/TOKENS // EX: "<blah> <dah>" => ["<blah>", "<dah>"] fullTerminals[i] = term.trim().split("[ \t]+"); } // PLACE PROCESSED LINE IN MAP ruleMap.put(nonTerm, fullTerminals); } } private String pickRandomWord(String[][] terminals) { String phrase = ""; int num = rand.nextInt(terminals.length); String[] term = terminals[num]; // CHOOSE RANDOM TERMINAL SECTION for(int i = 0; i < term.length; i++) // LOOP THROUGH WORDS IF MORE THAN ONE { if(ruleMap.containsKey(term[i])) // IF KEY IN MAP, RECURSIVE { phrase += pickRandomWord(ruleMap.get(term[i])); } else { phrase += term[i] + " "; // IF NOT KEY JUST APPEND } } return phrase; } //================================================================= //-----------------------------DEBUG------------------------------- //================================================================= private void printMapDEBUG() { for(String key : ruleMap.keySet()) { System.out.print(key + " : "); String[][] arr = ruleMap.get(key); for(int i = 0; i < arr.length; i++) { System.out.print(Arrays.toString(arr[i])); } System.out.println(); } } }<file_sep>/** * This class instantiates a basic Animal object with a name, x/y coordinate values, * and a max speed. It is used in the AnimalMain class * * @author <NAME> * @version %I% %G% * @since 1.0 * * * * Hosted Javadocs => http://jlandowski.greenrivertech.net/Javadocs/IT145/assn3/ */ import java.util.*; public class HangmanManager { private Set<String> curSet; private SortedSet<Character> guessed; private String curPattern; private int guessesRemaining; /** * Default constructor that takes a List of words to use for hangman, * an int length to decide which length words to pick from, and int max * to track how many guesses are remaining. * * @param dictionary the list of words to create the initial set of words * @param length the length of the words to intially pick from * @param max the remaining amount of guesses to start with * @throws IllegalArgumentException if length &lt; 1 or max &lt; 0 * @pre List must not be empty. * @pre length &gt; 0 * @pre max &gt;= 0 */ public HangmanManager(List<String> dictionary, int length, int max) { if(length < 1 || max < 0) throw new IllegalArgumentException(); guessed = new TreeSet<Character>(); guessesRemaining = max; initializePattern(length); initializeSet(dictionary, length); } /** * Returns the current set of words being used for guessing. * * @return curSet * @post list.size() &gt; 0 */ public Set<String> words() { return curSet; } /** * Returns the current guesses remaining for the player. * * @return guessesRemaining * @post guessesRemaining &gt;= 0 */ public int guessesLeft() { return guessesRemaining; } /** * Returns a sorted set of characters already guessed by the player. * * @return guessed */ public SortedSet<Character> guesses() { return guessed; } /** * Returns the pattern string with white space seperating each character ex: "- e - -" instead of "-e--". * * @throws IllegalStateException if current set of words is empty (size &lt; 0) * @return current pattern string with characters seperated by whitespace * @post String characters are space seperated with no beginning or trailing space */ public String pattern() { errIfEmpty(); return expandedPattern(); } /** * Takes a guessed character, archives it in the guessed sorted set, checks if already guessed, * then chooses the new set of words to use and returns the number of occurences of the guessed * character, either 0 or more. * * @param guess the character guessed by the player * @throws IllegalArgumentException if char guessed is was already guessed * @return the number of occurences of the guessed character (int 0 or more) * @pre Character must be lower-case * @post returned occurences &gt;= 0 */ public int record(char guess) { errIfSameGuess(guess); archiveGuess(guess); int occurences = chooseNextSet(guess); if(occurences < 1) guessesRemaining--; return occurences; } //================================================================= //-----------------------PRIVATE HELPERS--------------------------- //================================================================= // 1 : CREATE MAP WITH ALL PATTERNS AND THEIR NUMBER OF MATCHED WORDS // 2 : FIND MOST POPULAR PATTERN // 3 : UPDATE CURRENT DISPLAY PATTERN // 4 : UPDATE CURRENT SET WITH WORDS MATCHING NEW PATTERN // 5 : RETURN NUMBER OF OCCURENCES OF NEW PATTERN private int chooseNextSet(char guess) { Map<String, Integer> patterns = new TreeMap<String, Integer>(); populatePatternsMap(patterns, guess); String newPattern = findLargestPattern(patterns); mergePatterns(newPattern); updateCurrentSet(newPattern, guess); return numOccurences(newPattern, guess); } private int numOccurences(String newPattern, char guess) { int count = 0; for(int i = 0; i < newPattern.length(); i++) { if(newPattern.charAt(i) == guess) count++; } return count; } // IF WORDS IN SET MATCH NEW PATTERN ADD TO NEW SET private void updateCurrentSet(String newPattern, char guess) { Set<String> newSet = new TreeSet<String>(); for(String word : curSet) { if(strip(word, guess).equals(newPattern)) newSet.add(word); } curSet = newSet; } // UPDATE CLASS PATTERN STRING WITH OLD + NEW MERGED private void mergePatterns(String newPattern) { String temp = ""; for(int i = 0; i < curPattern.length(); i++) { char current = curPattern.charAt(i); char other = newPattern.charAt(i); boolean bothDash = (current == '-' && other == '-'); boolean onlyCur = (current != '-' && other == '-'); if(bothDash) temp += '-'; else if(onlyCur) temp += current; else temp += other; } // SET CURRENT CLASS STRING PATTERN TO NEW ONE curPattern = temp; } // RETURN PATTERN WITH LARGEST COUNT VALUE I.E. MOST WORDS private String findLargestPattern(Map<String, Integer> patterns) { int largest = 0; String biggestPattern = ""; for(String pat : patterns.keySet()) { int value = patterns.get(pat); if(value > largest) { largest = value; biggestPattern = pat; } } return biggestPattern; } // STRIPS WORDS TO MAKE PATTERNS AND ADDS TO MAP // IF ALREADY THERE INCREMENT ITS VALUE private void populatePatternsMap(Map<String, Integer> patterns, char guess) { for(String word : curSet) { String pat = strip(word, guess); if(patterns.containsKey(pat)) patterns.put(pat, patterns.get(pat) + 1); else patterns.put(pat, 1); } } // STRIPS EVERYTHING BUT THE GUESSED CHAR AND RETURNS IT // PLACES DASHES WHERE EMPTY private String strip(String word, char guess) { String temp = ""; for(int i = 0; i < word.length(); i++) { if(word.charAt(i) == guess) temp += guess; else temp += '-'; } return temp; } private void initializeSet(List<String> list, int length) { curSet = new TreeSet<String>(); for(int i = 0; i < list.size(); i++) { // ADD WORDS TO SET OF SPECIFIED LENGTH String word = list.get(i); if(word.length() == length) curSet.add(word); } } // SET INITIAL PATTERN STRING TO ALL DASHES private void initializePattern(int length) { curPattern = ""; for(int i = 0; i < length; i++) { curPattern += '-'; } } // RETURN STRING WITH WHITESPACE BETWEEN CHARS private String expandedPattern() { int len = curPattern.length(); String temp = ""; // APPEND CHARS ADD SPACE EXCEPT LAST for(int i = 0; i < len-1; i++) { temp += curPattern.charAt(i); temp += ' '; } // APPEND LAST CHAR temp += curPattern.charAt(len-1); return temp; } private void archiveGuess(char guess) { guessed.add(guess); } private void errIfSameGuess(char guess) { if(guessed.contains(guess)) throw new IllegalArgumentException(); } private void errIfEmpty() { if(curSet.size() < 1) throw new IllegalStateException(); } }<file_sep><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_121) on Sun May 21 16:45:26 PDT 2017 --> <title>Index</title> <meta name="date" content="2017-05-21"> <link rel="stylesheet" type="text/css" href="stylesheet.css" title="Style"> <script type="text/javascript" src="script.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Index"; } } catch(err) { } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="package-summary.html">Package</a></li> <li>Class</li> <li><a href="overview-tree.html">Tree</a></li> <li><a href="deprecated-list.html">Deprecated</a></li> <li class="navBarCell1Rev">Index</li> <li><a href="help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="index.html?index-all.html" target="_top">Frames</a></li> <li><a href="index-all.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="contentContainer"><a href="#I:A">A</a>&nbsp;<a href="#I:B">B</a>&nbsp;<a href="#I:C">C</a>&nbsp;<a href="#I:G">G</a>&nbsp;<a href="#I:I">I</a>&nbsp;<a href="#I:M">M</a>&nbsp;<a href="#I:P">P</a>&nbsp;<a href="#I:R">R</a>&nbsp;<a href="#I:S">S</a>&nbsp;<a href="#I:T">T</a>&nbsp;<a href="#I:W">W</a>&nbsp;<a name="I:A"> <!-- --> </a> <h2 class="title">A</h2> <dl> <dt><span class="memberNameLink"><a href="CardArrayList.html#add-Card-">add(Card)</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Adds a Card object to the end of the CardArrayList</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#add-int-Card-">add(int, Card)</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Inserts the given Card object at the specified index location, shifting over all Card objects.</div> </dd> </dl> <a name="I:B"> <!-- --> </a> <h2 class="title">B</h2> <dl> <dt><span class="memberNameLink"><a href="Card.html#boost--">boost()</a></span> - Method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Permanently increases this card's current power and toughness by 10%, rounded.</div> </dd> </dl> <a name="I:C"> <!-- --> </a> <h2 class="title">C</h2> <dl> <dt><a href="Card.html" title="class in &lt;Unnamed&gt;"><span class="typeNameLink">Card</span></a> - Class in <a href="package-summary.html">&lt;Unnamed&gt;</a></dt> <dd> <div class="block">..</div> </dd> <dt><span class="memberNameLink"><a href="Card.html#Card--">Card()</a></span> - Constructor for class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Default Constructor, sets card power and toughness to random values between MIN and MAX contraints.</div> </dd> <dt><span class="memberNameLink"><a href="Card.html#Card-int-">Card(int)</a></span> - Constructor for class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Constructor, takes an int argument that sets both power and toughness to that argument.</div> </dd> <dt><span class="memberNameLink"><a href="Card.html#Card-int-int-">Card(int, int)</a></span> - Constructor for class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Constructor, takes 2 int arguments to set both power and toughness values seperately.</div> </dd> <dt><a href="CardArrayList.html" title="class in &lt;Unnamed&gt;"><span class="typeNameLink">CardArrayList</span></a> - Class in <a href="package-summary.html">&lt;Unnamed&gt;</a></dt> <dd> <div class="block">..</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#CardArrayList--">CardArrayList()</a></span> - Constructor for class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Default Constructor, sets initial capacity of CardArrayList to 10 and size to 0.</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#CardArrayList-int-">CardArrayList(int)</a></span> - Constructor for class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Constructor, sets initial capacity of CardArrayList to argument passed and size to 0.</div> </dd> <dt><a href="CardArrayMaster.html" title="class in &lt;Unnamed&gt;"><span class="typeNameLink">CardArrayMaster</span></a> - Class in <a href="package-summary.html">&lt;Unnamed&gt;</a></dt> <dd>&nbsp;</dd> <dt><span class="memberNameLink"><a href="CardArrayMaster.html#CardArrayMaster--">CardArrayMaster()</a></span> - Constructor for class <a href="CardArrayMaster.html" title="class in &lt;Unnamed&gt;">CardArrayMaster</a></dt> <dd>&nbsp;</dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#clear--">clear()</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Clear the CardArrayList, setting size to 0 and capacity to 10.</div> </dd> <dt><span class="memberNameLink"><a href="Card.html#compareTo-Card-">compareTo(Card)</a></span> - Method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Compares by cost first, then power, then toughness</div> </dd> </dl> <a name="I:G"> <!-- --> </a> <h2 class="title">G</h2> <dl> <dt><span class="memberNameLink"><a href="CardArrayList.html#get-int-">get(int)</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Returns the Card object found at given index</div> </dd> <dt><span class="memberNameLink"><a href="Card.html#getCost--">getCost()</a></span> - Method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Getter, returns the card's cost based on its power and toughness</div> </dd> <dt><span class="memberNameLink"><a href="Card.html#getPower--">getPower()</a></span> - Method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Getter, returns the card's power level</div> </dd> <dt><span class="memberNameLink"><a href="Card.html#getToughness--">getToughness()</a></span> - Method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Getter, returns the card's toughness level</div> </dd> </dl> <a name="I:I"> <!-- --> </a> <h2 class="title">I</h2> <dl> <dt><span class="memberNameLink"><a href="CardArrayList.html#indexOf-Card-">indexOf(Card)</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Returns the index of the first Card object equal to the Card given</div> </dd> </dl> <a name="I:M"> <!-- --> </a> <h2 class="title">M</h2> <dl> <dt><span class="memberNameLink"><a href="CardArrayMaster.html#main-java.lang.String:A-">main(String[])</a></span> - Static method in class <a href="CardArrayMaster.html" title="class in &lt;Unnamed&gt;">CardArrayMaster</a></dt> <dd>&nbsp;</dd> </dl> <a name="I:P"> <!-- --> </a> <h2 class="title">P</h2> <dl> <dt><a href="PremiumCard.html" title="class in &lt;Unnamed&gt;"><span class="typeNameLink">PremiumCard</span></a> - Class in <a href="package-summary.html">&lt;Unnamed&gt;</a></dt> <dd> <div class="block">This class represents a PremiumCard which is just a Card object that has a different toString display.</div> </dd> <dt><span class="memberNameLink"><a href="PremiumCard.html#PremiumCard--">PremiumCard()</a></span> - Constructor for class <a href="PremiumCard.html" title="class in &lt;Unnamed&gt;">PremiumCard</a></dt> <dt><span class="memberNameLink"><a href="PremiumCard.html#PremiumCard-int-">PremiumCard(int)</a></span> - Constructor for class <a href="PremiumCard.html" title="class in &lt;Unnamed&gt;">PremiumCard</a></dt> <dt><span class="memberNameLink"><a href="PremiumCard.html#PremiumCard-int-int-">PremiumCard(int, int)</a></span> - Constructor for class <a href="PremiumCard.html" title="class in &lt;Unnamed&gt;">PremiumCard</a></dt> </dl> <a name="I:R"> <!-- --> </a> <h2 class="title">R</h2> <dl> <dt><span class="memberNameLink"><a href="Card.html#randomInt-int-int-">randomInt(int, int)</a></span> - Static method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Returns a random int between min and max inclusive.</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#remove--">remove()</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Removes and returns the last Card object in the CardArrayList</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#remove-int-">remove(int)</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Removes and returns the Card object at the given index, and shifts all other Cards down.</div> </dd> </dl> <a name="I:S"> <!-- --> </a> <h2 class="title">S</h2> <dl> <dt><span class="memberNameLink"><a href="CardArrayList.html#shuffle--">shuffle()</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Randomly shuffles the Cards in the CardArrayList.</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#size--">size()</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Returns the current number of CardArrayList elements.</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#sort--">sort()</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Sorts the Card objects in descending order.</div> </dd> </dl> <a name="I:T"> <!-- --> </a> <h2 class="title">T</h2> <dl> <dt><span class="memberNameLink"><a href="Card.html#toString--">toString()</a></span> - Method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Returns the card's power and toughness in string form</div> </dd> <dt><span class="memberNameLink"><a href="CardArrayList.html#toString--">toString()</a></span> - Method in class <a href="CardArrayList.html" title="class in &lt;Unnamed&gt;">CardArrayList</a></dt> <dd> <div class="block">Returns a String representation of the CardArrayList prefixed with 0 and postfixed with current size.</div> </dd> <dt><span class="memberNameLink"><a href="PremiumCard.html#toString--">toString()</a></span> - Method in class <a href="PremiumCard.html" title="class in &lt;Unnamed&gt;">PremiumCard</a></dt> <dd> <div class="block">Returns the card's power and toughness in string form</div> </dd> </dl> <a name="I:W"> <!-- --> </a> <h2 class="title">W</h2> <dl> <dt><span class="memberNameLink"><a href="Card.html#weaken--">weaken()</a></span> - Method in class <a href="Card.html" title="class in &lt;Unnamed&gt;">Card</a></dt> <dd> <div class="block">Permanently lowers this card's current power and toughness by 10%, rounded.</div> </dd> </dl> <a href="#I:A">A</a>&nbsp;<a href="#I:B">B</a>&nbsp;<a href="#I:C">C</a>&nbsp;<a href="#I:G">G</a>&nbsp;<a href="#I:I">I</a>&nbsp;<a href="#I:M">M</a>&nbsp;<a href="#I:P">P</a>&nbsp;<a href="#I:R">R</a>&nbsp;<a href="#I:S">S</a>&nbsp;<a href="#I:T">T</a>&nbsp;<a href="#I:W">W</a>&nbsp;</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="package-summary.html">Package</a></li> <li>Class</li> <li><a href="overview-tree.html">Tree</a></li> <li><a href="deprecated-list.html">Deprecated</a></li> <li class="navBarCell1Rev">Index</li> <li><a href="help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="index.html?index-all.html" target="_top">Frames</a></li> <li><a href="index-all.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html> <file_sep>// <NAME>, CS145, Spring 2017, Section 2723 // Programming Assignment #2, 4/13/17 // This class insantiates a Lion critter to be rendered on live on CritterFrame. import java.awt.*; import java.util.Random; /** * This class instantiates a Lion critter that always INFECTS if it can, * otherwise turns LEFT if a wall is to the front or right, otherwise * turns RIGHT if a fellow lion is in front, otherwise just HOPs forward. * * @author <NAME> * @version %I% %G% * @since 1.0 */ public class Lion extends Critter { private Color lionColor; private Random rand; private int colorLoop, lastColor; /** * Only constructor for class Lion. */ public Lion() { rand = new Random(); lastColor = -1; lastColor = randRGB(); colorLoop = 0; } private int randRGB() { int rgb = -1; while(rgb == lastColor) // IF OLD COLOR PICK DIFFERENT { rgb = rand.nextInt(2); } if (rgb == 0) lionColor = Color.RED; else if(rgb == 1) lionColor = Color.GREEN; else lionColor = Color.BLUE; return rgb; } /** * Accepts info about current lion status, and will change color every * 3 times this method is called. * * Returns an action based on conditions : * <ul> * <li>If enemy in front : <b>INFECT ENEMY</b></li> * <li>Otherwise if front/right is a wall : <b>TURN LEFT</b></li> * <li>Otherwise if front is fellow Lion : <b>TURN RIGHT</b></li> * <li>Otherwise : <b>HOP FORWARD</b></li> * </ul> * * @param info object containing information about critter's current status * @return Action.INFECT if enemy in front, else Action.LEFT if front/right * is a wall, else Action.RIGHT if front is fellow lion, Action.HOP * if nothing else valid. */ public Action getMove(CritterInfo info) { if(colorLoop > 2) { colorLoop = 0; lastColor = randRGB(); } colorLoop++; if (info.getFront() == Neighbor.OTHER) return Action.INFECT; else if(info.getFront() == Neighbor.WALL || info.getRight() == Neighbor.WALL) return Action.LEFT; else if(info.getFront() == Neighbor.SAME) return Action.RIGHT; else return Action.HOP; } /** * Returns the color of the lion. * * @return lionColor, holding either Color.RED, Color.GREEN or Color.BLUE. */ public Color getColor() { return lionColor; } /** * Returns the string that is displayed as the label for the lion. * * @return L */ public String toString() { return "L"; } } <file_sep>// <NAME>, CS145, Spring 2017, Section 2723 // Programming Assignment #2, 4/13/17 // This class insantiates a Giant critter to be rendered on live on CritterFrame. // Failed code is at the bottom. // // I initially tried to make gator with out making it // copy the super overpowered Bear and Giant critters by following Flytrap's strategy // of gathering together and just spinning while sometimes moving, this failed miserably. // // I then tried implementing an IN DANGER method that would detect if an enemy was near and // force a HOP, I initially combined this with the turtling stategy before, also useless. // // I then implemented a stalking that the gator would start when hitting a wall. An array // held a chain of movements to fire in sequence when hitting a wall, causing it to turn // around, move back one, and turn to face the wall again, then wait, attemping to infect // anything that would run by. I tried this because the bear/giants tend to circle the map, // usually on the boundary, I figured, despite the accuracy penalty to INFECT for not moving, // that this would still be beneficial as a gator could potentially snag 1-2 enemies for free, // that move by, this failed as well. // // Based on the rules and experiments I found that moving a lot is just too OP, so anything // that involved halting movement ended up being extremely detrimental. So my final strategy // that ended up working (mostly) was combining both moving as much as possible, with the first // strategy, which was grouping together. So now, the gators when running into each other, will // mimic the direction of the gator they ran into, thus following them side by side, but turning // randomly if 2 gators hit each other so they don't get stuck. Otherwise, they just keep moving // and turn randomly when hitting a wall. Turning left everytime copies bears and lets bears win, // turning right everytime copies giants and lets giants win, so instead gators just random this // turn when hitting walls, otherwise they tend to gang up and win. // // It's not perfect as variance still lets gators win about 43% of the time, bears and fly traps // tend to tie, giants won half as much, and lions win never. Lions suck. Even when bears are // dominating, the grouping behavior of gators lets them SOMETIMES make a comeback, or at the // very least, put up a good fight and last quite a while. // // Tries: 51 //------------ // gator: 22 // trap : 11 // bear : 12 // giant: 5 // stalemate[flytrap vs giant] : 1 import java.awt.*; import java.util.Random; /** * This class instantiates a Gator critter that always INFECTS if it can, * otherwise if it runs into a fellow Gator, it turns to face the same Direction * as that Gator, otherwise tries to HOP forward, otherwise turns LEFT or RIGHT. * * @author <NAME> * @version %I% %G% * @since 1.0 */ public class Gator extends Critter { private Random rand; private String body; private boolean repeat; private Action echoAction; // NEEDED TO REPEAT A SPECIFIED ACTION /** * Only constructor for class Gator. */ public Gator() { rand = new Random(); repeat = false; echoAction = Action.HOP; body = "g"; } /** * Accepts info about current gator status. * * Returns an action based on conditions : * <ul> * <li>If enemy in front : <b>INFECT ENEMY</b></li> * <li>If friend in front : <b>TURN LEFT/RIGHT</b> to copy their direction</li> * <li>Otherwise if able : <b>HOP FORWARD</b></li> * <li>Otherwise : <b>TURN LEFT/RIGHT</b> randomly</li> * </ul> * * @param info object containing information about critter's current status * @return Action.INFECT if enemy in front, else Action.LEFT/Action.RIGHT if friend in front, * else Action.HOP if front empty, else Action.LEFT/Action.RIGHT randomly. */ public Action getMove(CritterInfo info) { if(info.getFront() == Neighbor.OTHER) { body = "G"; return Action.INFECT; } else if(repeat) return echoAction; else if(foundFriend(info)) { if(!friendFacingMe(info)) { switch(info.getFrontDirection()) { case NORTH: return turnNorth(info); case SOUTH: return turnSouth(info); case EAST: return turnEast(info); case WEST: return turnWest(info); } } } else if(info.getFront() == Neighbor.EMPTY) { body = "g"; return Action.HOP; } return randTurn(); } /** * Returns the color of the giant. * * @return Color rgb of [50, 200, 50], darker green. */ public Color getColor() { return new Color(50, 200, 50); } /** * Returns the string that is displayed as the label for the gator. * * @return g or G if gator is infecting */ public String toString() { return body; } //======================================================================================= //------------------------------------PRIVATE HELPERS------------------------------------ //======================================================================================= private Action randTurn() { if( rand.nextBoolean() ) return Action.LEFT; else return Action.RIGHT; } private boolean friendFacingMe(CritterInfo info) { Direction meFacing = info.getDirection(); Direction themFacing = info.getFrontDirection(); switch(meFacing) { case NORTH: return themFacing == Direction.SOUTH; case SOUTH: return themFacing == Direction.NORTH; case EAST: return themFacing == Direction.WEST; case WEST: return themFacing == Direction.EAST; default: return false; } } private boolean foundFriend(CritterInfo info) { return info.getFront() == Neighbor.SAME; } private Action turnWest(CritterInfo info) { if(info.getDirection() == Direction.NORTH) return Action.LEFT; else if(info.getDirection() == Direction.SOUTH) return Action.RIGHT; else if(info.getDirection() == Direction.EAST) { Action act = randTurn(); repeat = true; echoAction = act; return act; } else return Action.HOP; } private Action turnNorth(CritterInfo info) { if(info.getDirection() == Direction.EAST) return Action.LEFT; else if(info.getDirection() == Direction.WEST) return Action.RIGHT; else if(info.getDirection() == Direction.SOUTH) { Action act = randTurn(); repeat = true; echoAction = act; return act; } else return Action.HOP; } private Action turnEast(CritterInfo info) { if(info.getDirection() == Direction.NORTH) return Action.RIGHT; else if(info.getDirection() == Direction.SOUTH) return Action.LEFT; else if(info.getDirection() == Direction.WEST) { Action act = randTurn(); repeat = true; echoAction = act; return act; } else return Action.HOP; } private Action turnSouth(CritterInfo info) { if(info.getDirection() == Direction.EAST) return Action.RIGHT; else if(info.getDirection() == Direction.WEST) return Action.LEFT; else if(info.getDirection() == Direction.NORTH) { Action act = randTurn(); repeat = true; echoAction = act; return act; } else return Action.HOP; } } //======================================================================================= //---------------------------------FAILED EXPERIMENTS------------------------------------ //======================================================================================= // stalking = false; // stalktime = stalkStage = 0; /*private final Action[] stalkActions = { Action.LEFT, Action.LEFT, Action.HOP, Action.RIGHT, Action.RIGHT, Action.INFECT, Action.INFECT, Action.INFECT }; */ //private int stalktime, stalkStage; //private boolean stalking; //private final Action[] stalkActions = { Action.LEFT, Action.LEFT }; /*private Action stalk() { Action act = stalkActions[stalkStage]; stalkStage++; if(stalkStage > stalkActions.length - 1) { stalkStage = 0; stalking = false; } return act; }*/ /*public Action getMove(CritterInfo info) { if(info.getFront() == Neighbor.WALL) stalking = true; if(info.getFront() == Neighbor.OTHER) { body = "G"; return Action.INFECT; } else if(stalking) { if( isMoving(info) ) return Action.INFECT; else return stalk(); } else if(info.getFront() != Neighbor.EMPTY) { body = "g"; return randTurn(); } else return Action.HOP; } private boolean isMoving(CritterInfo info) { return stalkActions[stalkStage] == Action.HOP && info.getFront() != Neighbor.EMPTY; }*/ /*private boolean huggingFriend(CritterInfo info) { Neighbor friend = Neighbor.SAME; if(info.getLeft() == friend || info.getRight() == friend || info.getFront() == friend || info.getBack() == friend) { return true; } else return false; }*/ /*private boolean inDanger(CritterInfo info) { switch(info.getDirection()) { case NORTH: if(info.getLeft() == Neighbor.OTHER && info.getLeftDirection() == Direction.EAST) return true; else if(info.getRight() == Neighbor.OTHER && info.getRightDirection() == Direction.WEST) return true; else if(info.getBack() == Neighbor.OTHER && info.getBackDirection() == Direction.NORTH) return true; break; case SOUTH: if(info.getLeft() == Neighbor.OTHER && info.getLeftDirection() == Direction.WEST) return true; else if(info.getRight() == Neighbor.OTHER && info.getRightDirection() == Direction.EAST) return true; else if(info.getBack() == Neighbor.OTHER && info.getBackDirection() == Direction.SOUTH) return true; break; case EAST: if(info.getLeft() == Neighbor.OTHER && info.getLeftDirection() == Direction.SOUTH) return true; else if(info.getRight() == Neighbor.OTHER && info.getRightDirection() == Direction.NORTH) return true; else if(info.getBack() == Neighbor.OTHER && info.getBackDirection() == Direction.EAST) return true; break; case WEST: if(info.getLeft() == Neighbor.OTHER && info.getLeftDirection() == Direction.NORTH) return true; else if(info.getRight() == Neighbor.OTHER && info.getRightDirection() == Direction.SOUTH) return true; else if(info.getBack() == Neighbor.OTHER && info.getBackDirection() == Direction.WEST) return true; break; } return false; }*/ <file_sep>// <NAME>, CS 145, Spring 2017, Section A, #2723 // Programming Assignment #7, 5/28/17 // Class => QuestionTree // Used By => QuestionMain, VaderMain // Needs => QuestionNode(included) // // Hosted Javadocs => http://jlandowski.greenrivertech.net/Javadocs/IT145/assn7/ import java.io.PrintStream; import java.util.Scanner; /** * This class instantiates a Binary Tree of Questions and Answers, as well as contains * its helper class QuestionNode. The QuestionTree will maintain a Binary Tree of questions * either initialized with a default answer or loaded from a saved file, then populates the * tree during runtime as the user adds new questions and answers. * * @author <NAME> * @version %I% %G% * @since 1.0 */ public class QuestionTree { private UserInterface ui; private QuestionNode root; private int games, wins; //================================================================= //-------------------------CONSTRUCTORS---------------------------- //================================================================= /** * Initializes the question Binary Tree with a single default answer and stores the * given UserInterface. * * @param ui The UserInterface the QuestionTree will use * @throws IllegalArgumentException if ui is null * @pre UserInterface must exist and not be null, must contain the * following methods: nextBoolean(), nextLine(), println(), print() * @post Populates a tree with a default answer and stores the given ui */ public QuestionTree (UserInterface ui) { if(ui == null) throw new IllegalArgumentException("QuestionTree needs a UserInterface."); this.ui = ui; this.root = new QuestionNode("Computer"); } //================================================================= //----------------------------GENERAL------------------------------ //================================================================= /** * Starts a round of 20 Questions, will continue guessing until the computer has * reached an answer, if that answer is wrong the computer will prompt the user for * a new answer and a corresponding question and handle the data changes internally. */ public void play() { games++; root = findAnswer(root); } /** * Archives the questions and answers in the current tree to a file of specified name. * * @param output The PrintStream pointing to the file * @throws IllegalArgumentException if the PrintStream is null * @pre PrintStream must contain a file and not be null * @post Saves the tree data to the text file */ public void save(PrintStream output) { if(output == null) throw new IllegalArgumentException("QuestionTree needs a PrintStream"); archiveNodeText(root, output); } /** * Takes a file of questions and answers and creates a Binary Tree out of it. * * @param input The Scanner containing the file * @throws IllegalArgumentException if Scanner is null or if missing a line * @pre Scanner must contain a file and not be null or missing lines * @post Prepares a tree of the questions and answers */ public void load(Scanner input) { if(input == null) throw new IllegalArgumentException("QuestionTree needs a Scanner."); root = createNode(input); } //================================================================= //---------------------------GETTERS------------------------------- //================================================================= /** * Returns a int representing the number of games played. * * @return The current number of games played */ public int totalGames() { return games; } /** * Returns a int representing the number of computer wins. * * @return The current number of computer wins */ public int gamesWon() { return wins; } //================================================================= //------------------------PRIVATE-HELPERS-------------------------- //================================================================= private QuestionNode findAnswer(QuestionNode node) { // IS ANSWER if(node.left == null || node.right == null) node = checkAnswer(node); else { // IS QUESTION SO ASK ui.print(node.text); if ( ui.nextBoolean() ) node.left = findAnswer(node.left); else node.right = findAnswer(node.right); } return node; } private QuestionNode checkAnswer(QuestionNode answer) { ui.print("\nWould your object happen to be " + answer.text + "?"); if( ui.nextBoolean() ) { wins++; ui.println("I win!"); return answer; } ui.print("\nI lose. What is your object?"); QuestionNode newAnswer = new QuestionNode( ui.nextLine() ); ui.print("\nType a yes/no question to distinguish your item from " + answer.text); QuestionNode question = new QuestionNode( ui.nextLine() ); ui.print("\nAnd what is the answer for your object?"); boolean yes = ui.nextBoolean(); question.left = (yes) ? newAnswer : answer; question.right = (yes) ? answer : newAnswer; return question; } // this is my favorite one, i love recursion private QuestionNode createNode(Scanner lines) { if( !lines.hasNextLine() ) throw new IllegalArgumentException("Missing line in Scanner"); String text = lines.nextLine(); QuestionNode node = new QuestionNode(text.substring(2)); if( text.startsWith("Q:") ) { node.left = createNode(lines); node.right = createNode(lines); } return node; } private void archiveNodeText(QuestionNode node, PrintStream file) { boolean isQuestion = (node.left != null && node.right != null); file.println( (isQuestion ? "Q:" : "A:") + node.text); if(isQuestion) { archiveNodeText(node.left, file); archiveNodeText(node.right, file); } } //================================================================= //-----------------------PRIVATE-CLASSES--------------------------- //================================================================= private class QuestionNode { String text; QuestionNode left, right; QuestionNode() { throw new IllegalStateException("Node must be given a Question or Answer"); } QuestionNode(String text) { this.text = text; } } // END CLASS //================================================================= //-----------------------JAVA-UTILITIES---------------------------- //================================================================= //================================================================= //-------------------------STATIC-HELPERS-------------------------- //================================================================= //================================================================= //-----------------------------DEBUG------------------------------- //================================================================= } // END CLASS<file_sep>/** * <NAME> * CS145 : Spring 2017 : Section 2723 * Inclass Assignment WeEK #5 : 5/2/17 * * This program searches a string recursively and tests each character * pair by pair to see if it is truly a palindrome. The method will take * a word, cut off and test the first and last characters, if they match * it will pass the remaining word to itself and continue, as soon as any * it finds any mismatch it kills the loop, if it reaches the end with no * mismatches it will return true up the chain. * */ public class RecursiveTestMain { public static void main(String[] args) { System.out.println("madam : " + isPalindrome("madam")); System.out.println("racecar : " + isPalindrome("racecar")); System.out.println("step on no pets : " + isPalindrome("step on no pets")); System.out.println("able was I ere I saw elba : " + isPalindrome("able was I ere I saw elba")); System.out.println("Java : " + isPalindrome("Java")); System.out.println("rotater : " + isPalindrome("rotater")); System.out.println("byebye : " + isPalindrome("byebye")); System.out.println("notion : " + isPalindrome("notion")); } public static boolean isPalindrome(String word) { // GRAB LENGTH int len = word.length(); if(len < 2) { // IF REMAINING STRING IS 1 OR 0 CHARS ITS A PALINDROME return true; } else if(front.equals(back)) // GET FIRST CHAR String front = word.substring(0, 1); // GET LAST CHAR String back = word.substring(len - 1); { // RETURN WORD WITH FIRST AND LAST CHAR SLICED OFF return isPalindrome(word.substring(1, len - 1)); } else { // AS SOON AS FIRST/LAST CHAR MISMATCH KILL RECURSION return false; } } }<file_sep>// <NAME>, CS 145, Spring 2017, Section A, #2723 // Programming Assignment #5, 5/12/17 // Object Class => AnagramManager // Client Class => AnagramMain // Dependency => Word // // Hosted Javadocs => http://jlandowski.greenrivertech.net/Javadocs/IT145/assn5/ import java.util.Arrays; import java.util.Random; import java.util.List; import java.util.Set; import java.util.TreeSet; import java.util.Map; import java.util.TreeMap; /** * This class manages the anagram data structure for a dictionary of words, as well as serving * requests for anagrams. * * @author <NAME> * @version %I% %G% * @since 1.0 */ public class AnagramManager { Word[] words; Map<String,Set<String>> anagramMap; Random rand; /** * Constructor, takes an unmodifiable list of words and instantiates a Word object * for each, stored in an array. Initializes a map of canonical words to sets of * their matching anagrams. Initializes a random object. * * @param listOfWords A List of strings passed to AnagramManager * @throws IllegalArgumentException if list is null or list length &lt; 1 * @pre List must not be empty * @pre List must not be null * @post Instantiates an AnagramManager object with initialized Word array * and anagram map */ public AnagramManager(List<String> listOfWords) { if(listOfWords == null || listOfWords.size() < 1) throw new IllegalArgumentException(); rand = new Random(); words = new Word[ listOfWords.size() ]; anagramMap = new TreeMap<String,Set<String>>(); populateWords(listOfWords); populateAnagrams(); } /** * Sorts the internal array of Word objects by their original alphabetical ordering. */ public void sortByWord() { // SET COMPARE TO ORIGINAL WORD VALUE for(Word obj : words) { obj.compareOriginal(); } Arrays.sort(words); } /** * Sorts the internal array of Word objects by their canonical ordering. */ public void sortByForm() { // SET COMPARE TO CANONICAL WORD VALUE for(Word obj : words) { obj.compareCanonical(); } Arrays.sort(words); } /** * Picks a random word a canonical form equal to canonical form of the word given.<br> * Ex: [hurrdurr : hdrrrruu] == [durrhurr : hdrrrruu] * * @param word The word used to find an anagram * @return Random word of matching canonical form, if no match returns * empty string */ public String getAnagram(String word) { String canon = Word.canonicalize(word); if(anagramMap.containsKey(canon)) { Set<String> set = anagramMap.get(canon); String[] matches = set.toArray(new String[ set.size() ]); return matches[ rand.nextInt(matches.length) ]; } else return ""; } /** * Finds and returns a set of all matching anagrams of the word given. * * @param word The word used to find anagrams * @return A sorted set of all anagrams of the word given */ public Set<String> getAnagrams(String word) { String canon = Word.canonicalize(word); boolean hasAnagram = anagramMap.containsKey(canon); Set<String> anagrams = (hasAnagram) ? anagramMap.get(canon) : new TreeSet<String>(); return anagrams; } /** * Returns up to 5 of the first and last Words held in the internal array, displaying * their normal and canonical forms.<br> * * @return A string containing the first and last 5 internal Words */ public String toString() { String printedWords = ""; int len = words.length; if(len < 1) printedWords += "[]"; else { int cap = (len < 5) ? len : 5; for(int i = 0; i < cap; i++) printedWords += words[i]; printedWords += "[...]"; for(int i = len-cap; i < len; i++) printedWords += words[i]; } return printedWords; } //================================================================= //---------------------------HELPERS------------------------------- //================================================================= private void populateWords(List<String> list) { int index = 0; for(String originalWord : list) { words[index] = new Word(originalWord); index++; } } private void populateAnagrams() { for(Word obj : words) { String canon = obj.getForm(); String orig = obj.getWord(); if( !anagramMap.containsKey(canon) ) { // ADD KEY MAPPED TO NEW SET Set<String> set = new TreeSet<String>(); set.add(orig); anagramMap.put(canon, set); } else { // GRAB SET AND ADD NORMAL WORD TO IT anagramMap.get(canon).add(orig); } } } //================================================================= //-----------------------------DEBUG------------------------------- //================================================================= }<file_sep><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_121) on Sun May 14 20:26:16 PDT 2017 --> <title>Index</title> <meta name="date" content="2017-05-14"> <link rel="stylesheet" type="text/css" href="stylesheet.css" title="Style"> <script type="text/javascript" src="script.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Index"; } } catch(err) { } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="package-summary.html">Package</a></li> <li>Class</li> <li><a href="overview-tree.html">Tree</a></li> <li><a href="deprecated-list.html">Deprecated</a></li> <li class="navBarCell1Rev">Index</li> <li><a href="help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="index.html?index-all.html" target="_top">Frames</a></li> <li><a href="index-all.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="contentContainer"><a href="#I:A">A</a>&nbsp;<a href="#I:C">C</a>&nbsp;<a href="#I:D">D</a>&nbsp;<a href="#I:G">G</a>&nbsp;<a href="#I:M">M</a>&nbsp;<a href="#I:S">S</a>&nbsp;<a href="#I:T">T</a>&nbsp;<a href="#I:W">W</a>&nbsp;<a name="I:A"> <!-- --> </a> <h2 class="title">A</h2> <dl> <dt><a href="AnagramMain.html" title="class in &lt;Unnamed&gt;"><span class="typeNameLink">AnagramMain</span></a> - Class in <a href="package-summary.html">&lt;Unnamed&gt;</a></dt> <dd>&nbsp;</dd> <dt><span class="memberNameLink"><a href="AnagramMain.html#AnagramMain--">AnagramMain()</a></span> - Constructor for class <a href="AnagramMain.html" title="class in &lt;Unnamed&gt;">AnagramMain</a></dt> <dd>&nbsp;</dd> <dt><a href="AnagramManager.html" title="class in &lt;Unnamed&gt;"><span class="typeNameLink">AnagramManager</span></a> - Class in <a href="package-summary.html">&lt;Unnamed&gt;</a></dt> <dd> <div class="block">This class manages the anagram data structure for a dictionary of words, as well as serving requests for anagrams.</div> </dd> <dt><span class="memberNameLink"><a href="AnagramManager.html#AnagramManager-java.util.List-">AnagramManager(List&lt;String&gt;)</a></span> - Constructor for class <a href="AnagramManager.html" title="class in &lt;Unnamed&gt;">AnagramManager</a></dt> <dd> <div class="block">Constructor, takes an unmodifiable list of words and instantiates a Word object for each, stored in an array.</div> </dd> </dl> <a name="I:C"> <!-- --> </a> <h2 class="title">C</h2> <dl> <dt><span class="memberNameLink"><a href="Word.html#canonicalize-java.lang.String-">canonicalize(String)</a></span> - Static method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Takes a word and returns a letter-sorted (canonical) version of that word.</div> </dd> <dt><span class="memberNameLink"><a href="Word.html#compareCanonical--">compareCanonical()</a></span> - Method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Sets the Word to be compared based on its canonical form.</div> </dd> <dt><span class="memberNameLink"><a href="Word.html#compareOriginal--">compareOriginal()</a></span> - Method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Sets the Word to be compared based on its original form.</div> </dd> <dt><span class="memberNameLink"><a href="Word.html#compareTo-Word-">compareTo(Word)</a></span> - Method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Compares another Word to this Word based on whichever form is currently set to be compared to.</div> </dd> </dl> <a name="I:D"> <!-- --> </a> <h2 class="title">D</h2> <dl> <dt><span class="memberNameLink"><a href="AnagramMain.html#DICTIONARY_FILE">DICTIONARY_FILE</a></span> - Static variable in class <a href="AnagramMain.html" title="class in &lt;Unnamed&gt;">AnagramMain</a></dt> <dd>&nbsp;</dd> </dl> <a name="I:G"> <!-- --> </a> <h2 class="title">G</h2> <dl> <dt><span class="memberNameLink"><a href="AnagramMain.html#getAnagram-java.util.Scanner-AnagramManager-">getAnagram(Scanner, AnagramManager)</a></span> - Static method in class <a href="AnagramMain.html" title="class in &lt;Unnamed&gt;">AnagramMain</a></dt> <dd>&nbsp;</dd> <dt><span class="memberNameLink"><a href="AnagramManager.html#getAnagram-java.lang.String-">getAnagram(String)</a></span> - Method in class <a href="AnagramManager.html" title="class in &lt;Unnamed&gt;">AnagramManager</a></dt> <dd> <div class="block">Picks a random word a canonical form equal to canonical form of the word given.<br> Ex: [hurrdurr : hdrrrruu] == [durrhurr : hdrrrruu]</div> </dd> <dt><span class="memberNameLink"><a href="AnagramManager.html#getAnagrams-java.lang.String-">getAnagrams(String)</a></span> - Method in class <a href="AnagramManager.html" title="class in &lt;Unnamed&gt;">AnagramManager</a></dt> <dd> <div class="block">Finds and returns a set of all matching anagrams of the word given.</div> </dd> <dt><span class="memberNameLink"><a href="Word.html#getCompareForm--">getCompareForm()</a></span> - Method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Returns the current form of the word being used for comparison.</div> </dd> <dt><span class="memberNameLink"><a href="Word.html#getForm--">getForm()</a></span> - Method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Returns the canonical version of the word.</div> </dd> <dt><span class="memberNameLink"><a href="Word.html#getWord--">getWord()</a></span> - Method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Returns the original form of the word.</div> </dd> </dl> <a name="I:M"> <!-- --> </a> <h2 class="title">M</h2> <dl> <dt><span class="memberNameLink"><a href="AnagramMain.html#main-java.lang.String:A-">main(String[])</a></span> - Static method in class <a href="AnagramMain.html" title="class in &lt;Unnamed&gt;">AnagramMain</a></dt> <dd>&nbsp;</dd> </dl> <a name="I:S"> <!-- --> </a> <h2 class="title">S</h2> <dl> <dt><span class="memberNameLink"><a href="AnagramManager.html#sortByForm--">sortByForm()</a></span> - Method in class <a href="AnagramManager.html" title="class in &lt;Unnamed&gt;">AnagramManager</a></dt> <dd> <div class="block">Sorts the internal array of Word objects by their canonical ordering.</div> </dd> <dt><span class="memberNameLink"><a href="AnagramManager.html#sortByWord--">sortByWord()</a></span> - Method in class <a href="AnagramManager.html" title="class in &lt;Unnamed&gt;">AnagramManager</a></dt> <dd> <div class="block">Sorts the internal array of Word objects by their original alphabetical ordering.</div> </dd> </dl> <a name="I:T"> <!-- --> </a> <h2 class="title">T</h2> <dl> <dt><span class="memberNameLink"><a href="AnagramManager.html#toString--">toString()</a></span> - Method in class <a href="AnagramManager.html" title="class in &lt;Unnamed&gt;">AnagramManager</a></dt> <dd> <div class="block">Returns up to 5 of the first and last Words held in the internal array, displaying their normal and canonical forms.<br></div> </dd> <dt><span class="memberNameLink"><a href="Word.html#toString--">toString()</a></span> - Method in class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Returns the original and canonical word in this form : [blah=abhl]</div> </dd> </dl> <a name="I:W"> <!-- --> </a> <h2 class="title">W</h2> <dl> <dt><a href="Word.html" title="class in &lt;Unnamed&gt;"><span class="typeNameLink">Word</span></a> - Class in <a href="package-summary.html">&lt;Unnamed&gt;</a></dt> <dd> <div class="block">This class manages both the original and canonical forms of a single word, as well as managing its comparable state for sorting.</div> </dd> <dt><span class="memberNameLink"><a href="Word.html#Word-java.lang.String-">Word(String)</a></span> - Constructor for class <a href="Word.html" title="class in &lt;Unnamed&gt;">Word</a></dt> <dd> <div class="block">Constructor, takes a String and initializes internal strings representing both the original form of the string and the canonical version.</div> </dd> </dl> <a href="#I:A">A</a>&nbsp;<a href="#I:C">C</a>&nbsp;<a href="#I:D">D</a>&nbsp;<a href="#I:G">G</a>&nbsp;<a href="#I:M">M</a>&nbsp;<a href="#I:S">S</a>&nbsp;<a href="#I:T">T</a>&nbsp;<a href="#I:W">W</a>&nbsp;</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="package-summary.html">Package</a></li> <li>Class</li> <li><a href="overview-tree.html">Tree</a></li> <li><a href="deprecated-list.html">Deprecated</a></li> <li class="navBarCell1Rev">Index</li> <li><a href="help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="index.html?index-all.html" target="_top">Frames</a></li> <li><a href="index-all.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html> <file_sep>/** * <NAME> * CS145 : Spring 2017 : Section 2723 * Inclass Assignment WeeK #6 : 5/2/17 * Stacks and Queues * * This program hsa 2 methods to modify a queue of values. * Stutter method makes a copy of each value in a queue : * [1, 2, 3] => [1, 1, 2, 2, 3, 3] * * Mirror method appends a reversed copy of a queue to itself : * [a, b, c] => [a, b, c, c, b, a] * */ import java.util.LinkedList; import java.util.Queue; import java.util.Stack; public class StackQueuePractice { public static void main(String[] args) { Queue<Integer> nums = new LinkedList<Integer>(); nums.add(1); nums.add(2); nums.add(3); System.out.println("Stutter:"); System.out.println("Old queue : " + nums); stutter(nums); System.out.println("New queue : " + nums); System.out.println("===================================="); Queue<String> words = new LinkedList<String>(); words.add("a"); words.add("b"); words.add("c"); System.out.println("Mirror:"); System.out.println("Old queue : " + words); mirror(words); System.out.println("New queue : " + words); } private static void stutter(Queue<Integer> nums) { // ISOLATE LENGTH NECESSARY int len = nums.size(); for(int i = 0; i < len; i++) { // ADD COPY OF FRONT TO BACK // [1, 2, 3] => [1, 2, 3, 1] nums.add( nums.peek() ); // MOVE FRONT TO BACK // [1, 2, 3, 1] => [2, 3, 1, 1] nums.add( nums.remove() ); } } private static void mirror(Queue<String> words) { // ISOLATE LENGTH NECESSARY int len = words.size(); Stack<String> tempStack = new Stack<String>(); Queue<String> tempQueue = new LinkedList<String>(); // COPY FRONT TO STACK | MOVE FRONT TO NEW QUEUE // [a, b] => stack[a] queue[a] // [b] => stack[a, b] queue[a, b] // [] => stack[a, b, c] queue[a, b, c] for(int i = 0; i < len; i++) { tempStack.push( words.peek() ); tempQueue.add( words.remove() ); } // DUMP TEMP QUEUE TO MAIN QUEUE // temp[a, b, c] => [a, b, c] for(int i = 0; i < len; i++) { words.add( tempQueue.remove() ); } // DUMP TEMP STACK TO MAIN QUEUE // temp[a, b, c] => [a, b, c, c, b, a] for(int i = 0; i < len; i++) { words.add( tempStack.pop() ); } } }<file_sep>// <NAME>, CS 145, Spring 2017, Section A, #2723 // Programming Assignment #6, 5/20/17 // Class => Card // Used By => CardArrayMaster, CardArrayList // Needs => // // Hosted Javadocs => http://jlandowski.greenrivertech.net/Javadocs/IT145/assn6/ import java.util.Random; /** * .. * * @author <NAME> * @version %I% %G% * @since 1.0 */ public class Card implements Comparable<Card> { private static final int MIN_POWER = 1; // STATIC FOR SHARED EFFICIENCY private static final int MAX_POWER = 1000; private static final int MIN_TOUGHNESS = 1; private static final int MAX_TOUGHNESS = 1000; private static Random rand = new Random(); // STATIC FOR TRUE RANDOM private int power; private int toughness; //================================================================= //-------------------------CONSTRUCTORS---------------------------- //================================================================= /** * Default Constructor, sets card power and toughness to random values between MIN and MAX * contraints. (Default is 1 - 1000 inclusive for both). */ public Card() { // GET RANDOM FROM MIN TO MAX INCLUSIVE power = randomInt(MIN_POWER, MAX_POWER); toughness = randomInt(MIN_TOUGHNESS, MAX_TOUGHNESS); } /** * Constructor, takes an int argument that sets both power and toughness to that argument. * * @param num The value for both card power and toughness * @throws IllegalArgumentException if num &lt; MIN or num &gt; MAX * @pre Num needs to be within MIN-MAX constraints for both power and toughness * @post Sets internal power and toughness values to num value */ public Card(int num) { this(num, num); } /** * Constructor, takes 2 int arguments to set both power and toughness values seperately. * * @param power The value for card power * @param toughness The value for card toughness * @throws IllegalArgumentException if power &lt; MIN or power &gt; MAX * @throws IllegalArgumentException if toughness &lt; MIN or toughness &gt; MAX * @pre Power needs to be within MIN-MAX constraints for power * @pre Toughness needs to be within MIN-MAX constraints for toughness * @post Sets internal power and toughness values */ public Card(int power, int toughness) { if(power < MIN_POWER || power > MAX_POWER) { throw new IllegalArgumentException("Input power is out of bounds for constructor"); } else if(toughness < MIN_TOUGHNESS || toughness > MAX_TOUGHNESS) { throw new IllegalArgumentException("Input toughness is out of bounds for constructor"); } this.power = power; this.toughness = toughness; } //================================================================= //---------------------------GETTERS------------------------------- //================================================================= /** * Getter, returns the card's power level * * @return The card's power level */ public int getPower() { return power; } /** * Getter, returns the card's toughness level * * @return The card's toughness level */ public int getToughness() { return toughness; } /** * Getter, returns the card's cost based on its power and toughness * * @return ceiling( square root( power * 1.5 + toughness * 0.9 ) ) */ public int getCost() { return (int)Math.ceil ( Math.sqrt ( power * 1.5 + toughness * 0.9 ) ); } //================================================================= //-----------------------JAVA-UTILITIES---------------------------- //================================================================= /** * Returns the card's power and toughness in string form * * @return String in form of "[power/toughness]" */ public String toString() { return "[" + power + "/" + toughness + "]"; } /** * Compares by cost first, then power, then toughness * * @return String in form of "[power/toughness]" */ public int compareTo(Card other) { int costDiff = getCost() - other.getCost(); if(costDiff == 0) { int powerDiff = getPower() - other.getPower(); if(powerDiff == 0) return getToughness() - other.getToughness(); return powerDiff; } return costDiff; } //================================================================= //----------------------------GENERAL------------------------------ //================================================================= /** * Permanently lowers this card's current power and toughness by 10%, rounded. */ public void weaken() // Documentation doesn't specify if should be rounded up or down. { power = (int) Math.round(power * 0.9); toughness = (int) Math.round(toughness * 0.9); } /** * Permanently increases this card's current power and toughness by 10%, rounded. */ public void boost() { power = (int) Math.round(power * 1.1); toughness = (int) Math.round(toughness * 1.1); } //================================================================= //------------------------PRIVATE-HELPERS-------------------------- //================================================================= //================================================================= //-------------------------STATIC-HELPERS-------------------------- //================================================================= /** * Returns a random int between min and max inclusive. * * @param min Minimum int value * @param max Maximum int value * @throws IllegalArgumentException if min &gt; max * @return An integer between min and max inclusive * @pre Min must be less or equal to Max * @post Returns an int between min and max inclusive */ public static int randomInt(int min, int max) { if(min > max) throw new IllegalArgumentException("Min cannot be greater than max"); return rand.nextInt(max - (min - 1)) + min; } //================================================================= //-----------------------------DEBUG------------------------------- //================================================================= } // END CLASS
361a2791824e6dccf5c68f389ddecdd2b074d3bd
[ "Java", "HTML" ]
11
Java
JakeLandowski/JavaWork2
3421eee24cf3ce5488ee928c709fe9e8ac918da9
b6aabbfded49d58d42cb9c5c711a280433942858
refs/heads/master
<file_sep>import { StandardTemplateRoomProvider } from "./providers/StandardTemplateRoomProvider"; import { ClassicTileProvider } from "./providers/ClassicTileProvider"; import { DunGen, ETiles } from "./map/DunGen"; import { WorldMap } from "./map/WorldMap"; import { Player } from "./actors/Player"; export class World extends PIXI.Container { public map: WorldMap; public player: Player; constructor() { super(); } public async init() { await StandardTemplateRoomProvider.ready(); await ClassicTileProvider.ready(); let dungeon = DunGen(StandardTemplateRoomProvider.templates, { width: 100, height: 100 } ); this.map = new WorldMap(this, dungeon, ClassicTileProvider); this.addChild(this.map); this.player = new Player(this); this.addChild(this.player.sprite); breakpoint: for (let j = 0; j < this.map.dungeon.height; j ++) { for (let i = 0; i < this.map.dungeon.width; i ++) { if (this.map.dungeon.tiles.get(i, j) === ETiles.EMPTY) { this.player.x = i * 32 + 2; this.player.y = j * 32 + 2; break breakpoint; } } } this.player.syncVisuals(); } public update() { this.player.update(); this.player.syncVisuals(); this.x += (-this.player.x + 1600 / 2 - this.x) / 12; this.y += (-this.player.y + 900 / 2 - this.y) / 12; } }<file_sep>import {opts} from "./common/dungeonOpts"; import {DunGen} from "./common/DunGen"; import {networkManager, mouse} from "./root"; import {World} from "./World"; let world = new World(); world.renderer.setSize(1920, 1080); world.renderer.domElement.style.width = "100vw"; world.renderer.domElement.style.height = "100vh"; document.body.appendChild(world.renderer.domElement); mouse.setTarget(world.renderer.domElement); let fps = 0; let now = Date.now(); function render() { fps ++; let ticktime = Date.now(); if (ticktime - now > 1000) { console.log(fps); fps = 0; now = ticktime; } world.update(); world.render(); requestAnimationFrame(render); } requestAnimationFrame(render); networkManager.init(); networkManager.socket.once("init", (m: any) => { let dungeon = DunGen(opts, m.map); world.buildDungeon(dungeon); } ); <file_sep>import {Game} from "./Game"; export class Network { public sockets: SocketIO.Socket[] = []; public game?: Game; public addConnection(socket: SocketIO.Socket) { if (!this.game) this.game = new Game(); this.sockets.push(socket); console.log("connection added " + socket.id); this.game.addPlayer(socket.id); socket.emit("init", { id: socket.id, map: this.game.dungeon.seed, } ); socket.on("disconnect", () => { this.sockets.splice(this.sockets.indexOf(socket), 1); socket.removeAllListeners(); if (this.game && this.game.removePlayer(socket.id) === 0) this.game = undefined; console.log("connection removed " + socket.id); } ); } }<file_sep>import {MersenneTwister} from "./MersenneTwister"; export interface IDunGenConfig { width: number; height: number; roomAttempts?: number; roomSize?: number; turnRate?: number; roomSizeRange?: number; extraConnectionRate?: number; maxSectionConnections?: number; seed?: number; } const adjacencies = [[-1, 0], [1, 0], [0, -1], [0, 1]]; export class Map2d<V> { public changeLog: [number, number, V][] = []; public data: V[] = []; constructor(public width: number, public height: number, initializer: V | ((x: number, y: number) => V)) { for (let i = 0; i < width * height; i ++) { if (typeof initializer === "function") { this.data[i] = initializer(i % width, Math.floor(i / width)); } else { this.data[i] = initializer; } } } public get(x: number, y: number) { if (x < 0 || x >= this.width || y < 0 || y >= this.height) throw new Error("Index out of bounds"); return this.data[x + y * this.width]; } public set(x: number, y: number, val: V) { if (x < 0 || x >= this.width || y < 0 || y >= this.height) throw new Error("Index out of bounds"); this.changeLog.push([x, y, val]); return this.data[x + y * this.width] = val; } public toString(strfn?: (d: V) => string) { if (!strfn) strfn = (d) => d.toString(); return this.data.map(strfn).map( (str, i) => (i + 1) % this.width === 0 ? (str + "\n") : (str + " ") ).join(""); } } export function DunGen(config: IDunGenConfig, seed?: number) { let width = config.width; let height = config.height; let roomAttempts = config.roomAttempts || 50; let roomSize = config.roomSize || Math.min(width, height) / 5; let turnRate = config.turnRate || 0.15; let roomSizeRange = config.roomSizeRange || 0.25; let extraConnectionRate = config.extraConnectionRate !== undefined ? config.extraConnectionRate : 0.1; let maxSectionConnections = config.maxSectionConnections || 3; let sectionLabel = 1; let rooms: { x: number, y: number, width: number, height: number }[] = []; if (seed === undefined) seed = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER); let mt = new MersenneTwister(seed); let maxRoomId = 0; let dungeon = new Map2d(width, height, 0); roomLoop: for (let i = 0; i < roomAttempts; i ++) { let roomWidth = Math.max(5, Math.abs(Math.round(roomSize + (mt.random() - 0.5) * 2 * roomSize * roomSizeRange))); if (roomWidth % 2 === 0) roomWidth ++; let roomHeight = Math.max(5, Math.abs(Math.round(roomSize + (mt.random() - 0.5) * 2 * roomSize * roomSizeRange))); if (roomHeight % 2 === 0) roomHeight ++; let x = Math.floor(mt.random() * (width - roomWidth) / 2) * 2 + 1; let y = Math.floor(mt.random() * (height - roomHeight) / 2) * 2 + 1; for (let u = x; u < x + roomWidth; u ++) { for (let v = y; v < y + roomHeight; v ++) { if (dungeon.get(u, v) !== 0) { continue roomLoop; } } } for (let u = x; u < x + roomWidth; u ++) { for (let v = y; v < y + roomHeight; v ++) { dungeon.set(u, v, sectionLabel); } } rooms.push({ x, y, width: roomWidth, height: roomHeight }); maxRoomId = sectionLabel; sectionLabel ++; } function depthFirstMaze(x: number, y: number, direction: 0 | 1 | 2 | 3) { dungeon.set(x, y, sectionLabel); let tryDirection = direction; if (direction === 0 && mt.random() < turnRate) { tryDirection = mt.random() < 0.5 ? 1 : 3; } else if (direction === 1 && mt.random() < turnRate) { tryDirection = mt.random() < 0.5 ? 0 : 2; } else if (direction === 2 && mt.random() < turnRate) { tryDirection = mt.random() < 0.5 ? 1 : 3; } else if (direction === 3 && mt.random() < turnRate) { tryDirection = mt.random() < 0.5 ? 0 : 2; } let flipflop = Math.floor(mt.random() * 2) * 2; let attemps = <(0 | 1 | 2 | 3)[]>[tryDirection, (tryDirection + 2) % 4, (tryDirection + 1 + flipflop) % 4, (tryDirection + 3 + flipflop) % 4]; for (let dir of attemps) { if (dir === (direction + 2) % 4) continue; let px = x; let py = y; let hx = x; let hy = y; switch(dir) { case 0: hx ++; px += 2; break; case 1: hy ++; py += 2; break; case 2: hx --; px -= 2; break; case 3: hy --; py -= 2; break; } if (px < 0 || px >= dungeon.width || py < 0 || py >= dungeon.height || dungeon.get(px, py) !== 0) continue; dungeon.set(hx, hy, sectionLabel); depthFirstMaze(px, py, dir); } } let offx = Math.floor(mt.random() * dungeon.width); let offy = Math.floor(mt.random() * dungeon.height); for (let i = 1; i < dungeon.width; i += 2) { for (let j = 1; j < dungeon.height; j += 2) { let x = (i + offx) % (dungeon.width - 1); let y = (j + offy) % (dungeon.height - 1); if (x % 2 === 0) x ++; if (y % 2 === 0) y ++; if (dungeon.get(x, y) === 0) { depthFirstMaze(x, y, mt.random() < 0.5 ? 0 : 1); sectionLabel ++; } } } let buckets: { [idxMatch: string]: [number, number][] } = {}; for (let i = 1; i < dungeon.width - 1; i ++) { for (let j = (i % 2 === 0) ? 1 : 2; j < dungeon.height - 1; j += 2) { let a = dungeon.get(i - 1, j) || dungeon.get(i, j - 1); let b = dungeon.get(i + 1, j) || dungeon.get(i, j + 1); if (a !== 0 && b !== 0 && a !== b) { let label = Math.min(a, b) + "," + Math.max(a, b); buckets[label] = buckets[label] || []; buckets[label].push([i, j]); } } } for (let label in buckets) { let hallway = parseInt(label.split(",")[1]) > maxRoomId; let entrances: number; if (hallway) { entrances = Math.floor(Math.pow(mt.random(), 2) * maxSectionConnections) + 1; } else { entrances = Math.round(mt.random()); } for (let i = 0; i < entrances; i ++) { let e = buckets[label][Math.floor(mt.random() * buckets[label].length)]; dungeon.set(e[0], e[1], sectionLabel); } sectionLabel ++; } function cullDeadEnd(x: number, y: number, recurse = true) { let paths = 0; if (dungeon.get(x - 1, y) !== 0) paths ++; if (dungeon.get(x + 1, y) !== 0) paths += 2; if (dungeon.get(x, y - 1) !== 0) paths += 4; if (dungeon.get(x, y + 1) !== 0) paths += 8; if (paths === 1 || paths === 2 || paths === 4 || paths === 8) dungeon.set(x, y, 0); if (!recurse) return; switch(paths) { case 1: { dungeon.set(x - 1, y, 0); cullDeadEnd(x - 2, y); break; } case 2: { dungeon.set(x + 1, y, 0); cullDeadEnd(x + 2, y); break; } case 4: { dungeon.set(x, y - 1, 0); cullDeadEnd(x, y - 2); break; } case 8: { dungeon.set(x, y + 1, 0); cullDeadEnd(x, y + 2); break; } } } for (let i = 1; i < dungeon.width; i += 2) { for (let j = 1; j < dungeon.height; j += 2) { if (dungeon.get(i, j) !== 0) cullDeadEnd(i, j); } } for (let i = 1; i < dungeon.width - 1; i ++) { for (let j = (i % 2 === 0) ? 1 : 2; j < dungeon.height - 1; j += 2) { if (dungeon.get(i, j) !== 0) continue; let a = dungeon.get(i - 1, j) || dungeon.get(i, j - 1); let b = dungeon.get(i + 1, j) || dungeon.get(i, j + 1); if (a !== 0 && b !== 0 && a === b && mt.random() < extraConnectionRate) { dungeon.set(i, j, a); } } } for (let i = 1; i < dungeon.width - 1; i ++) { for (let j = 1; j < dungeon.height - 1; j ++) { if (dungeon.get(i, j) === 0) dungeon.set(i, j, -1); } } function seedSectionCull(x: number, y: number, boundingSquares: [number, number][], sectionBounds = 0): number { if (x === 1 || x === dungeon.width - 2 || y === 1 || y === dungeon.height - 2) sectionBounds = -1; dungeon.set(x, y, 0); for (let off of [[-1, 0], [1, 0], [0, -1], [0, 1], [-1, -1], [1, -1], [-1, 1], [1, 1]]) { let tile = dungeon.get(x + off[0], y + off[1]); if (tile === -1) { let bounds = seedSectionCull(x + off[0], y + off[1], boundingSquares, sectionBounds); if (bounds < 0 || sectionBounds < 0 || (sectionBounds && bounds !== sectionBounds)) sectionBounds = -1; else sectionBounds = bounds; } else if (tile > 0) { if ((sectionBounds && sectionBounds !== tile) || sectionBounds < 0) sectionBounds = -1; else { sectionBounds = tile; boundingSquares.push([x + off[0], y + off[1]]); } } } return sectionBounds!; } let overall: [number, number][] = []; for (let i = 0; i < dungeon.width; i ++) { for (let j = 0; j < dungeon.height; j ++) { if (dungeon.get(i, j) === -1) { let outline: [number, number][] = []; if (seedSectionCull(i, j, outline) > 0) { overall = overall.concat(outline); } } } } for (let point of overall) { dungeon.set(point[0], point[1], -2); } function adjacentToCarved(x: number, y: number) { if (dungeon.get(x - 1, y) > 0) return true; if (dungeon.get(x + 1, y) > 0) return true; if (dungeon.get(x, y - 1) > 0) return true; if (dungeon.get(x, y + 1) > 0) return true; return false; } function connectsSelf(x: number, y: number, ownId: number) { let selfConnections = 0; if (Math.abs(dungeon.get(x - 1, y)) === Math.abs(ownId)) selfConnections ++; if (Math.abs(dungeon.get(x + 1, y)) === Math.abs(ownId)) selfConnections ++; if (Math.abs(dungeon.get(x, y - 1)) === Math.abs(ownId)) selfConnections ++; if (Math.abs(dungeon.get(x, y + 1)) === Math.abs(ownId)) selfConnections ++; return selfConnections > 1; } let openList: [number, number, number][] = []; let openLabel = -sectionLabel; for (let point of overall) { if (dungeon.get(point[0], point[1]) < -2) continue; if (adjacentToCarved(point[0], point[1])) { openList.push([point[0], point[1], openLabel]); dungeon.set(point[0], point[1], openLabel); openLabel --; } } function paintBack(x: number, y: number, newId: number) { let oldId = dungeon.get(x, y); if (oldId === newId) return; dungeon.set(x, y, newId); if (dungeon.get(x - 1, y) === oldId) paintBack(x - 1, y, newId); if (dungeon.get(x + 1, y) === oldId) paintBack(x + 1, y, newId); if (dungeon.get(x, y - 1) === oldId) paintBack(x, y - 1, newId); if (dungeon.get(x, y + 1) === oldId) paintBack(x, y + 1, newId); } while (openList.length > 0) { let cell = openList.shift()!; let tile = dungeon.get(cell[0], cell[1]); if (tile === cell[2]) { if (connectsSelf(cell[0], cell[1], cell[2])) { dungeon.set(cell[0], cell[1], 0); } else { dungeon.set(cell[0], cell[1], -cell[2]); for (let off of adjacencies) { if (dungeon.get(cell[0] + off[0], cell[1] + off[1]) < -1) { dungeon.set(cell[0] + off[0], cell[1] + off[1], cell[2]); openList.push([cell[0] + off[0], cell[1] + off[1], cell[2]]); } } } } else if (tile < -2) { dungeon.set(cell[0], cell[1], -tile); paintBack(cell[0], cell[1], -cell[2]); for (let openCell of openList) { if (openCell[2] === tile) { if (dungeon.get(openCell[0], openCell[1]) === openCell[2]) { dungeon.set(openCell[0], openCell[1], cell[2]); } openCell[2] = cell[2]; } } for (let off of adjacencies) { if (dungeon.get(cell[0] + off[0], cell[1] + off[1]) < -1) { dungeon.set(cell[0] + off[0], cell[1] + off[1], cell[2]); openList.push([cell[0] + off[0], cell[1] + off[1], cell[2]]); } } } } for (let i = 1; i < dungeon.width - 1; i ++) { for (let j = 1; j < dungeon.height - 1; j ++) { if (dungeon.get(i, j) < 0) dungeon.set(i, j, 0); else if ((i % 2 === 0 || j % 2 === 0) && dungeon.get(i, j) !== 0) cullDeadEnd(i, j, false); } } for (let i = 1; i < dungeon.width; i += 2) { for (let j = 1; j < dungeon.height; j += 2) { if (dungeon.get(i, j) !== 0) cullDeadEnd(i, j); } } return { map: dungeon, rooms, seed }; } let _dummmy = null! && DunGen({} as any); export type DunGenPack = typeof _dummmy;<file_sep>import {DungeonGeometry} from "./objects/DungeonGeometry"; import {DunGenPack} from "./common/DunGen"; import * as Key from "./Key"; import {keyboard, mouse} from "./root"; import {Player} from "./objects/Player"; const groundPlane = new THREE.Plane(v3(0, 1, 0), 0); const speed = 0.4; export class World { public camera = new THREE.PerspectiveCamera(75, 16/9, 0.1, 1000); public scene = new THREE.Scene(); public player: Player; public renderer: THREE.WebGLRenderer; private overhead: THREE.DirectionalLight; constructor() { this.renderer = new THREE.WebGLRenderer(); this.renderer.shadowMap.enabled = true; this.renderer.shadowMap.type = THREE.PCFSoftShadowMap; let ambience = new THREE.AmbientLight(0xFFFFFF, 0.4); this.scene.add(ambience); let overhead = this.overhead = new THREE.DirectionalLight(0xFFFFFF, 0.4); overhead.position.set(-24, 30, 12); this.scene.add(overhead); this.player = new Player(); this.scene.add(this.player); } public update() { this.player.update(); this.camera.position.set(this.player.position.x, this.player.position.y + 70, this.player.position.z + 35); this.camera.lookAt(this.player.position); } public render() { this.renderer.render(this.scene, this.camera); } public buildDungeon(dungeon: DunGenPack) { let geom = new DungeonGeometry(dungeon); geom.position.x = 50; geom.position.z = 50; this.scene.add(geom); let startingRoom = dungeon.rooms[0]; this.player.position.x = startingRoom.x * 30 + startingRoom.width * 30 / 2; this.player.position.z = startingRoom.y * 30 + startingRoom.height * 30 / 2; } }<file_sep>import {DunGenPack} from "../common/DunGen"; let tex = new THREE.TextureLoader().load("/textures/stone_texture.jpg"); tex.wrapS = THREE.RepeatWrapping; tex.wrapT = THREE.RepeatWrapping; let mat1 = new THREE.MeshLambertMaterial( { map: tex, side: THREE.FrontSide } ); let mat2 = new THREE.MeshLambertMaterial( { color: 0xFF0000, side: THREE.FrontSide } ); export class DungeonGeometry extends THREE.Group { constructor(dungeon: DunGenPack) { super(); this.receiveShadow = true; for (let i = 0; i < dungeon.map.width; i ++) { for (let j = 0; j < dungeon.map.height; j ++) { if (dungeon.map.get(i, j) === 0) { let wall = new THREE.Mesh(new THREE.CubeGeometry(30, 20, 30), mat1); wall.receiveShadow = true; wall.castShadow = true; wall.position.x = i * 30; wall.position.y = 10; wall.position.z = j * 30; this.add(wall); } else { let floor = new THREE.Mesh(new THREE.PlaneGeometry(30, 30, 3, 3), mat2); floor.rotateX(-Math.PI / 2); floor.receiveShadow = true; floor.position.x = i * 30; floor.position.z = j * 30; this.add(floor); } } } } }<file_sep>// tslint:disable max-classes-per-file import * as Key from "./Key"; class Juggler { private enterFrameFunctions: [(() => void), any][] = []; private schedule: number; private interFrameTime: number; constructor(private fps: number) { this.interFrameTime = 1000 / fps; this.schedule = Date.now() + this.interFrameTime; let tick = () => { this.enterFrameFunctions.forEach( ([fn, ctx]) => fn.call(ctx) ); this.schedule += this.interFrameTime; let timeout = this.schedule - Date.now(); if (timeout < 2) { timeout = 2; this.schedule = Date.now() + this.interFrameTime; } setTimeout(tick, timeout); }; setTimeout(tick, this.interFrameTime); } public add(fn: () => void, context?: any) { if (this.has(fn, context) < 0) { this.enterFrameFunctions.push([fn, context]); } } public remove(fn: () => void, context?: any) { let idx = this.has(fn, context); if (idx >= 0) { this.enterFrameFunctions.splice(idx, 1); } } public has(fn: () => void, context?: any) { for (let [i, oef] of enumerate(this.enterFrameFunctions)) { if (oef[0] === fn && oef[1] === context) return i; } return -1; } public afterFrames(numFrames: number, fn: () => void, context?: any) { let wrapper = () => { numFrames --; if (numFrames <= 0) { fn.call(context); this.remove(wrapper); } }; this.add(wrapper); } } // export let juggler = new Juggler(60); class Keyboard { private keys: boolean[] = []; constructor() { window.addEventListener("keydown", (e) => this.keys[e.keyCode] = true ); window.addEventListener("keyup", (e) => this.keys[e.keyCode] = false ); } public isKeyDown(keycode: number) { return this.keys[keycode] || false; } } export let keyboard = new Keyboard(); class SoundManager { public static GLOBAL_VOLUME = 0.4; private music: { [songName: string]: { song: HTMLAudioElement, fade: number } } = {}; private tags: { [tag: string]: boolean } = {}; constructor() { // juggler.add(() => this.tags = {}); } public playSound(name: string, volume = 1, tag?: string) { if (tag) { if (!this.tags[tag]) { this.tags[tag] = true; } else { return; } } let audio = new Audio(name); audio.volume = volume * SoundManager.GLOBAL_VOLUME; audio.play(); audio.onended = () => audio.remove(); } public playMusic(name: string, volume = 1) { if (this.music.hasOwnProperty(name)) { if (!isNaN(this.music[name].fade)) window.clearInterval(this.music[name].fade); this.music[name].song.volume = volume * SoundManager.GLOBAL_VOLUME; return; } let audio = new Audio(name); audio.volume = volume * SoundManager.GLOBAL_VOLUME; audio.loop = true; audio.play(); this.music[name] = { song: audio, fade: NaN, }; } public fadeMusicOut(name: string) { if (!this.music.hasOwnProperty(name) || !isNaN(this.music[name].fade)) return; let fadeStart = this.music[name].song.volume; let fadeTime = 30; this.music[name].fade = window.setInterval(() => { fadeTime --; if (fadeTime <= 0) { this.music[name].song.pause(); this.music[name].song.remove(); window.clearInterval(this.music[name].fade); delete this.music[name]; } else { this.music[name].song.volume = fadeTime / 30 * fadeStart; } }, 16); } public setMusicVolume(name: string, volume: number) { if (!this.music.hasOwnProperty(name)) return; this.music[name].song.volume = volume * SoundManager.GLOBAL_VOLUME; } } export let soundManager = new SoundManager(); class NetworkManager { public socket: SocketIOClient.Socket; public init() { this.socket = io(); } } export let networkManager = new NetworkManager(); class Mouse { private target: HTMLElement = document.body; private mousePosition = { x: 0, y: 0 }; private mouseDown = false; constructor() { window.addEventListener("mousemove", (e) => { let rect = this.target.getBoundingClientRect(); let relX = e.clientX - rect.left; let relY = e.clientY - rect.top; this.mousePosition = { x: (relX / rect.width - 0.5) * 2, y: -(relY / rect.height - 0.5) * 2 }; } ); window.addEventListener("mousedown", () => this.mouseDown = true ); window.addEventListener("mouseup", () => this.mouseDown = false ); } public position() { return this.mousePosition; } public setTarget(target: HTMLElement) { this.target = target; } public isMouseDown() { return this.mouseDown; } } export let mouse = new Mouse();<file_sep>import {opts} from "./common/dungeonOpts"; import {DunGen} from "./common/DunGen"; import {Player} from "./Player"; export class Game { public numPlayers = 0; public players: { [id: string]: Player } = {}; public dungeon = DunGen(opts); public addPlayer(id: string): number { if (this.players[id] !== undefined) throw "Duplicate player"; let startRoom = this.dungeon.rooms[0]; let player = new Player(startRoom.x + startRoom.width, startRoom.y + startRoom.height); this.players[id] = player; this.numPlayers ++; return this.numPlayers; } public removePlayer(id: string) { if (this.players[id] === undefined) throw "Cannot remove missing player"; delete this.players[id]; this.numPlayers --; return this.numPlayers; } }<file_sep>import * as _THREE from "three"; declare global { function v2(x: number, y: number): THREE.Vector2; function v3(x: number, y: number, z: number): THREE.Vector3; function v3(xy: { x: number, y: number }, z: number): THREE.Vector3; const THREE: typeof _THREE; class VolumetricFire { public static texturePath: string; public mesh: THREE.Mesh; constructor(width: number, height: number, depth: number, sliceSpacing: number, renderCamera: THREE.Camera); public update(totalElapsed: number): void; } } <file_sep>import * as Key from "../Key"; import {keyboard, mouse} from "../root"; export class Player extends THREE.Group { private mesh: THREE.Mesh; private velocity = v2(0, 0); constructor() { super(); new THREE.JSONLoader().load("/models/stick.json", (geom) => { this.mesh = new THREE.Mesh(geom, new THREE.MeshLambertMaterial({ color: 0x666666 })); this.mesh.castShadow = true; this.add(this.mesh); } ); this.castShadow = true; } public update() { if (keyboard.isKeyDown(Key.W) && !keyboard.isKeyDown(Key.S)) { this.position.z --; } else if (keyboard.isKeyDown(Key.S) && !keyboard.isKeyDown(Key.W)) { this.position.z ++; } if (keyboard.isKeyDown(Key.A) && !keyboard.isKeyDown(Key.D)) { this.position.x --; } else if (keyboard.isKeyDown(Key.D) && !keyboard.isKeyDown(Key.A)) { this.position.x ++; } } }<file_sep>import * as express from "express"; import * as http from "http"; import * as socketIo from "socket.io"; import * as path from "path"; import {Network} from "./Network"; let app = express(); let server = new http.Server(app); let io = socketIo(server); app.use(express.static(path.join(__dirname, "..", "..", "cli", "dist"))); let network = new Network(); io.on('connection', (socket) => { network.addConnection(socket); } ); server.listen(8080, () => { console.log('listening on localhost:8080'); } ); <file_sep>import { Dungeon, ETiles } from "./map/DunGen"; import { root, juggler } from "./root"; import { World } from "./World"; function DungeonToString(dungeon: Dungeon): string { let repr = "\n"; for (let j = 0; j < dungeon.height; j ++) { for (let i = 0; i < dungeon.width; i ++) { switch (dungeon.tiles.get(i, j)) { case ETiles.SOLID: repr += "██"; break; case ETiles.EMPTY: repr += " "; break; case ETiles.WALL: repr += "██"; break; case ETiles.TOP_DOOR: repr += "^^"; break; case ETiles.BOTTOM_DOOR: repr += "vv"; break; case ETiles.LEFT_DOOR: repr += "<<"; break; case ETiles.RIGHT_DOOR: repr += ">>"; break; case ETiles.LADDER: repr += "TT"; break; default: repr += "??"; } } repr += "\n"; } return repr; } (window as any).DungeonToString = DungeonToString; async function main() { let app = new PIXI.Application( { width: 1600, height: 900, backgroundColor: 0x161616, } ); document.body.appendChild(app.view); root.setApp(app); let world = (window as any).world = new World(); app.stage.addChild(world); await world.init(); let fps = 60; let lastTick = 0; let fpsDisplay = new PIXI.Text("0", { align: "right", fontFamily: "Courier New", fontSize: 17, stroke: 0xFFFFFF, strokeThickness: 0.5 } ); fpsDisplay.anchor.set(1); fpsDisplay.x = app.view.width; fpsDisplay.y = app.view.height; app.stage.addChild(fpsDisplay); juggler.add( () => { world.update(); if (lastTick > 0) { let tick = Date.now(); if (!isFinite(fps)) { fps = 1000 / (tick - lastTick); } else { fps = fps * 0.99 + (1000 / (tick - lastTick)) * 0.01; } lastTick = tick; } else { lastTick = Date.now(); } fpsDisplay.text = fps.toFixed(1); } ); } window.addEventListener("load", main);<file_sep>import {IDunGenConfig} from "./DunGen"; export const opts: IDunGenConfig = { width: 41, height: 41, turnRate: 0.1, roomSize: 7, roomSizeRange: 0.6, roomAttempts: 30, maxSectionConnections: 2, extraConnectionRate: 0.8, };
99cf80e9b06159cc019153ea8d4dc5c6fede3e90
[ "TypeScript" ]
13
TypeScript
HypoLast/runner2
0aff1d375faf4f686963df103048b634373f8ca1
abee14fa8d3a316c34b18191b12e8b65184b0cd4
refs/heads/master
<file_sep>using System; class AgeAfte10years { static void Main(string[] args) { Console.Write("Enter your birth date: Day/Month/Year:"); DateTime Birthday=DateTime.Parse(Console.ReadLine()); int age=(int)((DateTime.Now-Birthday).TotalDays/365.242199); Console.WriteLine("You are"+age+"year(s) old"); Console.WriteLine("After ten years you will be at the age of "+(age+10)); } } <file_sep>using System; class PrintASequence { static void Main(string[] args) { int number1 = 2; int number2 = -3; int number3 = 4; int number4 = -5; int number5 = 6; int number6 = -7; int number7 = 8; int number8 = -9; int number9 = 10; int number10 = -11; Console.WriteLine("{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}", number1, number2, number3, number4, number5, number6, number7, number8, number9, number10); } }
f30fb9e3728afe2aba5bef604173d42f50535486
[ "C#" ]
2
C#
metall22/Intro-Programming-CSharp
e558bef09cccf69b0901a3a1ffc47fb73bd20fe8
64b3c83e1d672ac1f7c98d5ac7af118e5da206e8
refs/heads/master
<file_sep>package main import ( "bufio" "fmt" "os" "strings" "regexp" "bytes" ) func main() { reader := bufio.NewReader(os.Stdin) fmt.Print("Enter trump suit (avaliable: S (Spades), H (Hearts), C (Clubs), D (Diamonds): ") inputText, _ := reader.ReadString('\n') inputText = strings.Replace(inputText, "\n", "", -1) if !isValid(inputText) { fmt.Fprintf(os.Stderr, "Input not valid !\n") os.Exit(1) } sortDeck := deckSorting(deck, inputText) result := formatting(sortDeck) fmt.Println("Sort deck:") fmt.Println(result) } func isValid(text string) bool { validInput, _ := regexp.MatchString("^[S|H|C|D]$", text) return validInput } func deckSorting(deck []Card, trump string) []Card { swap := true size := len(deck) for swap { swap = false for i := 0; i < size - 1; i++ { if isGreater(deck[i], deck[i + 1], trump) { deck[i], deck[i + 1] = deck[i + 1], deck[i] swap = true } } size -= 1 } return deck } func isGreater(card1 Card, card2 Card, trump string) bool { if card1.weight == 15 { return true } if card2.weight == 15 { return false } if card1.suit == trump && card2.suit != trump { return true } if card2.suit == trump && card1.suit != trump { return false } if card1.weight > card2.weight { return true } return false } func formatting(deck []Card) string { var buffer bytes.Buffer for _, card := range deck { cardName := fmt.Sprintf("%v%v ", card.suit, numbersRefs[card.weight]) buffer.WriteString(cardName) } return buffer.String() } type Card struct { suit string weight int } var deck = []Card{ Card{"S", 6}, Card{"S", 7}, Card{"S", 8}, Card{"S", 9}, Card{"S", 10}, Card{"S", 11}, Card{"S", 12}, Card{"S", 13}, Card{"S", 14}, Card{"H", 6}, Card{"H", 7}, Card{"H", 8}, Card{"H", 9}, Card{"H", 10}, Card{"H", 11}, Card{"H", 12}, Card{"H", 13}, Card{"H", 14}, Card{"C", 6}, Card{"C", 7}, Card{"C", 8}, Card{"C", 9}, Card{"C", 10}, Card{"C", 11}, Card{"C", 12}, Card{"C", 13}, Card{"C", 14}, Card{"D", 6}, Card{"D", 7}, Card{"D", 8}, Card{"D", 9}, Card{"D", 10}, Card{"D", 11}, Card{"D", 12}, Card{"D", 13}, Card{"D", 14}, Card{"Red", 15}, Card{"Black", 15}, } var numbersRefs = map[int]string{ 6: "6", 7: "7", 8: "8", 9: "9", 10: "10", 11: "J", 12: "Q", 13: "K", 14: "A", 15: "Jocker", }<file_sep>### This is test-task app on Go Реализовать объект "игральные карты" и на нём реализовать сортировку любым методом. Колода 36 карт + 2 джокера, джокеры всегда старше любой из мастей, старшинство масти всегда сильнее старшинства карты, козырной может быть только одна масть и её можно передать на входе. To get ``` go get github.com/Merff/cards_sort cd $GOPATH/src/github.com/Merff/cards_sort ``` To run ``` ./cards_sort ``` To test ``` go test ```<file_sep>package main import ( "testing" ) func TestDeckSorting(t *testing.T) { testValues := []string{ "S", "H", "C", "D", } trueResults := []string{ "H6 C6 D6 H7 C7 D7 H8 C8 D8 H9 C9 D9 H10 C10 D10 HJ CJ DJ HQ CQ DQ HK CK DK HA CA DA S6 S7 S8 S9 S10 SJ SQ SK SA BlackJocker RedJocker ", "C6 D6 S6 C7 D7 S7 C8 D8 S8 C9 D9 S9 C10 D10 S10 CJ DJ SJ CQ DQ SQ CK DK SK CA DA SA H6 H7 H8 H9 H10 HJ HQ HK HA RedJocker BlackJocker ", "D6 S6 H6 D7 S7 H7 D8 S8 H8 D9 S9 H9 D10 S10 H10 DJ SJ HJ DQ SQ HQ DK SK HK DA SA HA C6 C7 C8 C9 C10 CJ CQ CK CA BlackJocker RedJocker ", "S6 H6 C6 S7 H7 C7 S8 H8 C8 S9 H9 C9 S10 H10 C10 SJ HJ CJ SQ HQ CQ SK HK CK SA HA CA D6 D7 D8 D9 D10 DJ DQ DK DA RedJocker BlackJocker ", } var sortDeck []Card var got string var want string for i, value := range testValues { sortDeck = deckSorting(deck, value) got = formatting(sortDeck) want = trueResults[i] if got != want { t.Errorf("deckSorting(S)(%d) == %d, want %d", value, got, want) } } }
eff429dacf573c682c26fe42f9987dc37b4431ff
[ "Markdown", "Go" ]
3
Go
Merff/cards_sort
7b95304e5267620acfcd15638b95cf6780ce9bd4
d81320609b95fc0b20d4250001979dab98c5f0c8
refs/heads/master
<repo_name>Soumil491/first-react-app<file_sep>/src/pages/shop-page/shop-page.component.jsx import React from 'react'; import './shop-page.component.scss'; const ShopPage = () => { return( <div className="shop-page"> Shop Page </div> ) } export default ShopPage;<file_sep>/src/components/menu-item/menu-item.component.jsx import React from 'react'; import './menu-item.component.css' import { withRouter } from 'react-router-dom'; const MenuItem = ({ id, title, description, price, category, image, rating, history, match }) => { return ( <tr className="menu-item"> <td>{id}</td> <td> <img src={image} width="100px" alt={description}/> </td> <td>{title}</td> <td>{price}</td> <td>{description}</td> <td>{category}</td> <td>{rating.rate}</td> <td>{rating.count}</td> <td> <button onClick={() => {history.push(`${match.url} ${title}`)}}>Shop Now</button> </td> <td>{}</td> </tr> ) } export default withRouter(MenuItem);<file_sep>/src/pages/signin-page/signin.component.jsx import React from 'react'; import './signin.component.scss'; const SigninPage = () => { return ( <div className="signin-page"> Sign in page </div> ) } export default SigninPage;<file_sep>/src/pages/cart-page/cart-page.component.jsx import React from 'react'; import './cart-page.component.scss'; const CartPage = () => { return( <div className="cart-page"> Cart Page </div> ) } export default CartPage;<file_sep>/src/components/todoForm/todoForm.component.jsx import React from 'react'; import FormAdd from './formAdd.component'; import FormDelete from './formDelete.component'; const TodoForm = () => { return ( <div> <FormAdd /> <FormDelete /> </div> ) } export default TodoForm;<file_sep>/src/pages/home-page/home-page.component.jsx import React from 'react'; // import Directory from '../../components/directory/directory.component'; // import Navbar from '../../components/navbar/navbar.component'; import Todo from '../../components/todo/todo.component'; const HomePage = () => { return ( <div> <Todo /> </div> ) } export default HomePage;<file_sep>/src/testReduxWithUI.js import React from 'react'; import logo from './logo.svg'; //higher order component import { connect } from 'react-redux'; //import actions import { firstAction } from './actions/actionOne'; import { secondAction } from './actions/actionTwo'; import './rotatecss.css'; const mapStateToProps = state => ({ ...state }); const mapDispatchToProps = dispatch => ({ startAction: () => dispatch(firstAction), stopAction: () => dispatch(secondAction) }) class RotateFunction extends React.Component { render() { return ( <div className="RotateFunction"> <img src={logo} alt="logo" className={"App-logo" + (this.props.rotate ? "" : "App-logo-paused")} onClick={ this.props.rotate ? this.props.stopAction : this.props.startAction } /> </div> ) } } export default connect(mapStateToProps, mapDispatchToProps)(RotateFunction);<file_sep>/src/App.js import './App.css'; import HomePage from './pages/home-page/home-page.component.jsx'; import {Route} from 'react-router-dom'; import SignIn from './pages/signin-page/signin.component.jsx'; import Header from './components/header/header.component.jsx'; import Cart from './pages/cart-page/cart-page.component.jsx'; // import rotateFunction from '../src/redux/test-redux-withUI'; function App() { return ( <div className="App"> <Header/> <Route exact path="/cart" component={Cart}></Route> {/* <Route exact path="/redux" component={rotateFunction}></Route> */} <Route exact path="/" component={HomePage}></Route> <Route exact path="/signin" component={SignIn}></Route> </div> ); } export default App;
9724d1ab62f51f1f3bd52d47e1ad30f3d643c77d
[ "JavaScript" ]
8
JavaScript
Soumil491/first-react-app
4cfb8ee9b81e86fe9eabee92a2e84306286dba28
36463f6ece32617f1f0e358ab33c745087b6ede6
refs/heads/master
<repo_name>mapuchuz/zooJEE<file_sep>/src/main/java/org/zoo/javaee/EnclosBean.java package org.zoo.javaee; import java.util.ArrayList; import java.util.List; import javax.enterprise.inject.Model; import javax.inject.Inject; @Model public class EnclosBean { List<Enclos>enclos= new ArrayList<>(); @Inject EnclosServiceInterface service; public void getAllEnclos() { service.enclosServiceGetAll(); } public void refresh() { enclos= service.enclosServiceGetAll(); } public List<Enclos> getEnclos() { return enclos; } public void setEnclos(List<Enclos> enclos) { this.enclos = enclos; } public EnclosServiceInterface getService() { return service; } public void setService(EnclosServiceInterface service) { this.service = service; } } <file_sep>/src/main/java/org/zoo/javaee/Animal.java package org.zoo.javaee; public class Animal { private String nom; private String espece; private String description; private String photo; public String getNom() { return nom; } public Animal(String name, String espece) { this.nom= name; this.espece= espece; } public void setNom(String nom) { this.nom = nom; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public String getPhoto() { return photo; } public void setPhoto(String photo) { this.photo = photo; } public String getEspece() { return espece; } } <file_sep>/src/main/java/org/zoo/javaee/Zoo.java package org.zoo.javaee; import java.io.Serializable; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.List; import javax.faces.bean.ApplicationScoped; import javax.faces.bean.ManagedBean; import javax.faces.bean.ManagedProperty; @ManagedBean( name = "zoo" ) @ApplicationScoped public class Zoo implements Serializable { private static final long serialVersionUID = 1L; @ManagedProperty( value = "#{enclos}") private static List<Enclos>enclos= new ArrayList<>(); private static List<Animal>animaux= new ArrayList<>(); private static List<Visite>visites= new ArrayList<>(); private static List<Affectation>affectations= new ArrayList<>(); private static Zoo zoo; public static Zoo getZoo() { if( zoo==null ) zoo= new Zoo(); return zoo; } private Zoo() { load(); } private void load() { loadEnclos(); loadAnimal(); loadAffectation(); try { loadVisite(); } catch (ParseException e) { // TODO Auto-generated catch block e.printStackTrace(); } } private void loadAffectation() { affectations.add( new Affectation( enclos.get(3), animaux.get(0)) ); affectations.add( new Affectation( enclos.get(0), animaux.get(1)) ); affectations.add( new Affectation( enclos.get(1), animaux.get(2)) ); affectations.add( new Affectation( enclos.get(1), animaux.get(6)) ); affectations.add( new Affectation( enclos.get(2), animaux.get(7)) ); affectations.add( new Affectation( enclos.get(2), animaux.get(3)) ); affectations.add( new Affectation( enclos.get(4), animaux.get(4)) ); affectations.add( new Affectation( enclos.get(4), animaux.get(9)) ); affectations.add( new Affectation( enclos.get(4), animaux.get(10)) ); affectations.add( new Affectation( enclos.get(3), animaux.get(5)) ); affectations.add( new Affectation( enclos.get(3), animaux.get(8)) ); } private void loadVisite() throws ParseException { SimpleDateFormat sDF= new SimpleDateFormat("dd/MM/yyyy"); visites.add( new Visite( sDF.parse( "10/01/2016" ), 1,2)); visites.add( new Visite( sDF.parse( "11/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "12/01/2016" ), 2,0)); visites.add( new Visite( sDF.parse( "12/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "12/01/2016" ), 2,5)); visites.add( new Visite( sDF.parse( "13/01/2016" ), 1,1)); visites.add( new Visite( sDF.parse( "15/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "17/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "17/01/2016" ), 1,1)); visites.add( new Visite( sDF.parse( "17/01/2016" ), 6,1)); visites.add( new Visite( sDF.parse( "17/01/2016" ), 3,2)); visites.add( new Visite( sDF.parse( "17/01/2016" ), 3,4)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 1,4)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 2,0)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 14,0)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 1,3)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 3,16)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 7,20)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 2,10)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 1,0)); visites.add( new Visite( sDF.parse( "18/01/2016" ), 2,0)); } private void loadAnimal() { animaux.add( new Animal("coco", "Ara")); animaux.add( new Animal("buouaf", "Buffle")); animaux.add( new Animal("léo", "Léopard")); animaux.add( new Animal("gor", "Gorille")); animaux.add( new Animal("kang", "Kangourou")); animaux.add( new Animal("cocoara", "Ara")); animaux.add( new Animal("buorrf", "Buffle")); animaux.add( new Animal("léopardo", "Léopard")); animaux.add( new Animal("grillos", "Gorille")); animaux.add( new Animal("ourou", "Kangourou")); animaux.add( new Animal("djili", "Kangourou")); } private void loadEnclos() { enclos.add( new Enclos("enclos1")); enclos.add( new Enclos("enclos2")); enclos.add( new Enclos("enclos3")); enclos.add( new Enclos("enclos4")); enclos.add( new Enclos("enclos5")); } public static List<Affectation> getAffectations() { return affectations; } public static void setAffectations(List<Affectation> affectations) { Zoo.affectations = affectations; } public static List<Animal> getAnimaux() { return animaux; } public static List<Visite> getVisites() { return visites; } public static List<Enclos> getEnclos() { return enclos; } } <file_sep>/src/main/java/org/zoo/javaee/Sandbox.java package org.zoo.javaee; public class Sandbox { public Sandbox() { } }
c56ee4dfaebd54322a980534f1ccae3dfa0513de
[ "Java" ]
4
Java
mapuchuz/zooJEE
d61c4489cb8cfa97dc9d5232f31d58fbe5983a97
783544c45abe96c64bd209272dbd9d1322abf52b
refs/heads/master
<repo_name>vishaldhanani/RemoveDuplicateChars<file_sep>/RemoveDuplicate/Program.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace RemoveDuplicate { class Program { public static void Main(string[] args) { RemoveDuplicateChars("Methodology"); } // A Linq approach: //public static string RemoveDuplicateChars(string input) //{ // return new string(input.ToCharArray().Distinct().ToArray()); //} public static string RemoveDuplicateChars(string key) { // Remove duplicate chars using string concats. // ... Store encountered letters in this string. string result = ""; foreach (char value in key) { // See if character is in the result already. if (result.IndexOf(value) == -1) { // Append to the result. result += value; } } return result; } //public static string RemoveDuplicateChars(string s) //{ // string newString = string.Empty; // List<char> found = new List<char>(); // foreach (char c in s) // { // if (found.Contains(c)) // continue; // newString += c.ToString(); // found.Add(c); // } // return newString; //} //static void removeDups(string arr, int n) //{ // // elements which has appeared previously. // Dictionary<char, bool> d = new Dictionary<char, bool>(); // for (int i = 0; i < n; i++) // { // // Print the element if it is not // // there in the hash map // if (d.ContainsKey(arr[i]) != true) // { // d.Add(arr[i], true); // } // } // foreach (KeyValuePair<char, bool> x in d) // { // Console.Write(x.Key); // } // Console.ReadLine(); //} //// Driver Code //public static void Main(String[] args) //{ // string s = "Methodology"; // int n = s.Length; // removeDups(s, n); //} } }
22f157e09116d52a277c8a5ea73d3b0579564508
[ "C#" ]
1
C#
vishaldhanani/RemoveDuplicateChars
efcd110886b9a73f5322ef2d43dfab4442586c4a
70b8952bd39ccbbfda3ded5c19441c0c3444253c
refs/heads/master
<file_sep>#ifndef __RECORD_H__ #define __RECORD_H__ #include <vector> #include <cstdint> #include <cstring> #include "../parser/stmt.h" class Record { private: void unpack(); void pack(); public: vector<unsigned char> buf; vector<record_value> values; vector<table_column *> *table_info; Record() { }; Record( vector<record_value> &t, vector<table_column *> *col ) : values(t), table_info(col) { pack(); } Record( const vector<unsigned char> &tmp, vector<table_column *> *col ) : buf(tmp), table_info(col) { unpack(); }; record_value get_value(table_column *t) { for(auto x = table_info->begin(); x != table_info->end(); x++) { if ((*x)->name == t->name) { return values[x-table_info->begin()]; } } // should never reached here. return record_value(0); } record_value get_value(const string t) { for(auto x = table_info->begin(); x != table_info->end(); x++) { if ((*x)->name == t) { return values[x-table_info->begin()]; } } // should never reached here. return record_value(0); } inline uint32_t size() { return buf.size(); } }; #endif <file_sep>#define _CRT_SECURE_NO_DEPRECATE #include "File.h" #include <cstdio> #include <iostream> #include <fstream> #include <stdexcept> File::File() { alive = false; stream = nullptr; } File::File(std::string filename) : name(filename) { File(); std::FILE *file; file = std::fopen(filename.c_str(), "r"); if (!file){ file = std::fopen(filename.c_str(), "w"); if (!file) { std::string s = "Can not create '" + filename + "'"; //throw std::exception(s.c_str()); throw std::invalid_argument(s); } } else { std::string s = "Can not create file '" + filename + "': file already exists"; //throw std::exception(s.c_str()); throw std::invalid_argument(s); } if (file) { alive = true; std::fclose(file); } } bool File::isAlive() { return alive; } File::~File() { } Block* File::read(int num) { // for test open(); //if (num >= size || num < 0) { // throw std::exception("Block number overflow"); //} Block* newOne = new Block; stream->seekg(BLOCKSIZE * num); stream->read(newOne->dataPointer(), BLOCKSIZE); close(); return newOne; } void File::write(int num, Block* update) { open(); if (num >= size) { size = num; } stream->seekp(BLOCKSIZE * num); //std::cout << "write at "; //std::cout << stream->tellp() << std::endl; stream->write(update->dataPointer(), BLOCKSIZE); stream->flush(); //stream->seekp(0, std::ios::end); //std::cout << stream->tellp() << std::endl; close(); // for test //std::cout << blocks.size() << std::endl; //if (num >= static_cast<int>(blocks.size())) // blocks.resize(num + 1); //blocks[num] = update; //std::cout << blocks.size() << std::endl; } void File::open() { if (!alive) { //throw std::exception(std::string("Error in opening file: '" + name + "'").c_str()); } stream = new std::fstream(name, std::ios::in | std::ios::out | std::ios::binary); //std::cout << "Open file "<< std::endl; } void File::close() { if (!alive || stream == nullptr){ //throw std::exception(std::string("Error in closing file: '" + name + "'").c_str()); } stream->close(); delete stream; } void File::load(std::string filename) { std::FILE *file; file = std::fopen(filename.c_str(), "r"); if (!file) { //throw std::exception(std::string("Can not load file '" + filename + "'").c_str()); throw std::invalid_argument("Can not load file '" + filename + "'"); } alive = true; name = filename; } <file_sep>all : main main : src/parser/parser.y src/parser/parser.l src/buffer/Block.cpp src/parser/stmt.cpp src/buffer/BufferManager.cpp src/buffer/File.cpp src/catalog/catalog.cpp src/evaluator/evaluator.cpp src/evaluator/main.cpp src/index/IndexManager.cpp src/record/RecordManager.cpp src/record/record.cpp parser.tab.c lex.yy.c bison -d src/parser/parser.y flex src/parser/parser.l clang++ -std=c++11 -g -x c++ src/parser/stmt.cpp src/buffer/Block.cpp src/buffer/BufferManager.cpp src/buffer/File.cpp src/catalog/catalog.cpp src/evaluator/evaluator.cpp src/evaluator/main.cpp src/index/IndexManager.cpp src/record/RecordManager.cpp src/record/record.cpp parser.tab.c lex.yy.c -o main -lfl -lreadline -luuid clean: rm -rf parser.tab.c parser.tab.h lex.yy.c a.out* main* *.o data <file_sep>#include "../catalog/catalog.h" #include "../parser/stmt.h" #include <queue> queue<pair<stmt_type, statement *>> stmt_queue; catalog_manager catm("data"); void xyzsql_emit_stmt(stmt_type t, statement *stmt) { stmt_queue.push({t, stmt}); } void xyzsql_batch() { cout << "batch!" << endl; } void xyzsql_exit() { cout << "now exit!" << endl; exit(0); } void xyzsql_process_create_table() { cout << "table created." << endl; auto s = dynamic_cast<create_table_stmt *>(stmt_queue.front().second); if ( catm.exist_relation(s->name) == NULL ) { catm.add_relation(s); catm.write_back(); } else cerr << "Table name already exists." << endl; } void xyzsql_process_create_index() { cout << "index created." << endl; } void xyzsql_process_select() { cout << "select parsed." << endl; } void xyzsql_process_drop_table() { cout << "table dropped." << endl; } void xyzsql_process_drop_index() { cout << "index dropped." << endl; } void xyzsql_process_transaction() { cout << "Transaction on." << endl; } void xyzsql_process_commit() { cout << "Transaction committed." << endl; } void xyzsql_process_rollback() { cout << "Transaction rollbacked." << endl; } void xyzsql_process_delete() { cout << "records deleted." << endl; } void xyzsql_process_insert() { cout << "record inserted." << endl; } void xyzsql_unknown_stmt() { cout << "unknown statement, check syntax again." << endl; } <file_sep>#ifndef __STMT_H__ #define __STMT_H__ #include <iostream> #include <vector> #include <cassert> #include <sstream> #include <cstring> using namespace std; enum stmt_type { _create_table_stmt = 1, _create_index_stmt, _insert_stmt, _select_stmt, _delete_stmt, _drop_index_stmt, _drop_table_stmt, _transaction_stmt, _commit_stmt, _rollback_stmt, _quit_stmt, _exefile_stmt }; class table_column { public: static const int INTTYPE = 0, FLOATTYPE = 1, CHARTYPE = 2; static const int primary_attr = 1, index_attr = 2, notnull_attr = 4, unique_attr = 8; string name; int data_type, str_len, flag; table_column(const char *_name, int _data_type, int _str_len, int _flag) : name(_name), data_type(_data_type), str_len(_str_len), flag(_flag) {} }; string data_type_to_str(int data_type); class attribute { public: string attribute_name, relation_name, full_name; attribute(const char *relt, const char *attr) : attribute_name(attr), relation_name(relt), full_name(relation_name + "." + attribute_name) {attribute_name = full_name;} }; class record_value { public: uint32_t value; int data_type; record_value(uint32_t _value) : value(_value) {} record_value(char * _value) : data_type(table_column::CHARTYPE) { memcpy(&value, &_value, 4); } record_value(int _value) : data_type(table_column::INTTYPE) { memcpy(&value, &_value, 4); } record_value(float _value) : data_type(table_column::FLOATTYPE) { memcpy(&value, &_value, 4); } float as_float() const ; char * as_str() const ; int as_int() const ; string to_str(int data_type) ; static int compare_as_int(const record_value &a, const record_value &b) ; static int compare_as_float(const record_value &a, const record_value &b) ; static int compare_as_str(const record_value &a, const record_value &b) ; static int compare(int data_type, const record_value &a, const record_value &b) ; }; class condition { public: attribute *left_attr = nullptr, *right_attr = nullptr; // string str; // float fnum; // int inum; record_value v; int op; bool flag; condition(attribute *l, attribute *r, int _op) : left_attr(l), right_attr(r), v(0), op(_op), flag(true) {} condition(attribute *l, float r, int _op) : left_attr(l), v(r), op(_op), flag(false) {} condition(attribute *l, int r, int _op) : left_attr(l), v(r), op(_op), flag(false) {} condition(attribute *l, char *r, int _op) : left_attr(l), v(r), op(_op), flag(false) {} bool calc(pair<table_column *, record_value>) ; bool calc(pair<table_column *, record_value>, pair<table_column *, record_value> ) ; static const int EQUALTO = 1, GREATERTHAN = 2, LESSTHAN = 3, GREATER_EQUAL = 4, LESS_EQUAL = 5, NOT_EQUAL = 6; ~condition(); }; class algbric_node { public: int op; bool flag; algbric_node *left = nullptr, *right = nullptr; string table; vector<condition *> conditions; vector<attribute *> *projection_list; algbric_node(int _op) : op(_op) { flag = false; } static const int DIRECT = 0, PROJECTION = 1, SELECTION = 2, JOIN = 3, PRODUCTION = 4; ~algbric_node(); }; class statement { public: virtual ~statement() {}; }; class select_stmt : public statement { public: vector<attribute *> *projection_list; vector<string *> *table_list; vector<condition *> *condition_list; select_stmt(vector<attribute *> *pl, vector<string *> *tl, vector<condition *> *cl) : statement(), projection_list(pl), table_list(tl), condition_list(cl) {} ~select_stmt(); }; class create_table_stmt : public statement { public: string name; vector<table_column *> *cols; int get_col_size() { return cols->size(); } create_table_stmt(const char *_name, vector<table_column *> *_cols) : statement(), name(_name), cols(_cols) {} ~create_table_stmt(); }; class create_index_stmt : public statement { public: attribute *attr; create_index_stmt( attribute *_a ) : statement(), attr(_a) {} ~create_index_stmt(); }; class insert_stmt : public statement { public: string table_name; vector<record_value> *values; insert_stmt( const string &_table_name, vector<record_value> *_values ) : statement(), table_name(_table_name), values(_values) {} ~insert_stmt(); }; class drop_table_stmt : public statement { public: string table_name; drop_table_stmt( const string &_table_name ) : statement(), table_name(_table_name) {} }; class drop_index_stmt : public statement { public: attribute *attr; drop_index_stmt( attribute *_a ) : statement(), attr(_a) {} ~drop_index_stmt(); }; class delete_stmt : public statement { public: string table_name; vector<condition *> *condition_list; delete_stmt(const string _table_name, vector<condition *> *_condition_list) : statement(), table_name(_table_name), condition_list(_condition_list) {} ~delete_stmt(); }; class exefile_stmt : public statement { public: string file_name; exefile_stmt( const string &_file_name ) : statement(), file_name(_file_name) {} }; #endif <file_sep>#ifndef __EVALUATOR_H__ #define __EVALUATOR_H__ #include <readline/readline.h> #include <readline/history.h> #include <iostream> #include <cctype> #include <set> #include <cstdlib> #include <uuid/uuid.h> #include <sstream> #include <exception> #include <stdexcept> #include <cstdio> #include "../catalog/catalog.h" #include "../parser/stmt.h" #include "../record/record.h" #include "../buffer/Block.h" #include "../buffer/File.h" #include "../buffer/BufferManager.h" #include "../index/IndexManager.h" #include "../record/RecordManager.h" #include <queue> extern queue<pair<stmt_type, statement *> > stmt_queue; extern catalog_manager catm; void xyzsql_emit_stmt(stmt_type t, statement *stmt) ; void xyzsql_batch() ; void xyzsql_exit() ; void xyzsql_process_create_table(create_table_stmt *s = NULL) ; void xyzsql_process_create_index() ; void xyzsql_process_select() ; void xyzsql_process_drop_table() ; void xyzsql_process_drop_index() ; void xyzsql_process_transaction() ; void xyzsql_process_commit() ; void xyzsql_process_rollback() ; void xyzsql_process_delete() ; void xyzsql_process_insert(insert_stmt *s = NULL); void xyzsql_unknown_stmt() ; void xyzsql_finalize(); bool verify_validation(vector<record_value> *r, vector<table_column *> *t); #endif <file_sep>#pragma once #ifndef __FILE_H__ #define __FILE_H__ #include "Block.h" #include <vector> #include <fstream> class File { public: File(); File(std::string filename); ~File(); Block* read(int); void write(int, Block*); bool isAlive(); void open(); void close(); void load(std::string); private: std::vector<Block> blocks; // for test only std::string name; bool alive; std::fstream* stream; int size; }; #endif<file_sep>#pragma once #ifndef __BLOCK_H__ #define __BLOCK_H__ #include <stdint.h> #define BLOCKSIZE 4096 class Block { public: Block(); char *dataPointer(); ~Block(); unsigned char getByte(int32_t i){return data[i];} void setByte(unsigned char c, int32_t i){data[i]=c;} void fillOne(); // for test void fillZero(); bool equal(Block &); private: unsigned char data[BLOCKSIZE]; }; #endif <file_sep>#include "record.h" #include <cassert> #include <cstring> void Record::unpack() { vector<record_value> &result = values; auto t = table_info; auto j = buf.begin(); for( auto i = t->begin(); i != t->end(); j += (*i)->str_len, i++) { int a; float b; char * c; switch((*i)->data_type) { case table_column::INTTYPE : a = (*(j+3) << 24) | (*(j+2) << 16) | (*(j+1) << 8) | (*(j)); result.push_back(record_value(a)); break; case table_column::FLOATTYPE : a = (*(j+3) << 24) | (*(j+2) << 16) | (*(j+1) << 8) | (*(j)); b = *((float *)&a); result.push_back(record_value(b)); break; case table_column::CHARTYPE : c = new char[(*i)->str_len]; memset(c, 0, sizeof(char) * (*i)->str_len); for(int k = 0; k < (*i)->str_len; k++) c[k] = *(j+k); result.push_back(record_value(c)); break; } } } void Record::pack() { // vector<unsigned char> result; string tmp; auto t = table_info; auto j = values.begin(); char * c; int ttt; for( auto i = t->begin(); i != t->end(); i++, j++) { switch((*i)->data_type) { case table_column::INTTYPE : ttt = j->as_int(); tmp.append( (char *)&(ttt), 4 ); break; case table_column::FLOATTYPE : ttt = j->as_int(); tmp.append( (char *)&(ttt), 4 ); break; case table_column::CHARTYPE : c = new char[(*i)->str_len]; memset(c, 0, sizeof(char) * (*i)->str_len); strcpy(c, j->as_str()); tmp.append(c, (*i)->str_len); break; } } buf.assign(tmp.begin(), tmp.end()); } <file_sep>#include <readline/readline.h> #include <readline/history.h> #include <iostream> #include <cctype> #include <sstream> #include <ctime> #include "../parser/stmt.h" #include "../../parser.tab.h" #include "evaluator.h" typedef struct yy_buffer_state *YY_BUFFER_STATE; extern YY_BUFFER_STATE yy_scan_string (const char *yy_str ); extern void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); extern void yy_delete_buffer (YY_BUFFER_STATE b ); extern int yyparse (); using namespace std; BufferManager BufferManager; IndexManager IndexManager(&BufferManager); catalog_manager catm("."); RecordManager RecordManager; ifstream bat; string base_addr = "."; void system_init() { cout << "System Initialized!" << endl; RecordManager.Init(&BufferManager, &catm, &IndexManager); } int main() { system_init(); clock_t start_time = 0, end_time; string command; while(true) { const char *line; if ( !bat.is_open() ) { end_time = clock(); if (start_time != 0 ) { cout << (end_time - start_time) / (float)CLOCKS_PER_SEC * 1000 << " ms used. " << end_time - start_time << endl; } line = readline(">>> "); add_history(line); start_time = clock(); } else { getline(bat, command); if(bat.eof()) { bat.close(); continue; } line = command.c_str(); } int len = strlen(line); char *tmp = new char[len + 2]; strcpy(tmp, line); tmp[len + 1] = 0; YY_BUFFER_STATE my_string_buffer = yy_scan_string(tmp); yy_switch_to_buffer( my_string_buffer ); yyparse(); yy_delete_buffer( my_string_buffer ); delete[] tmp; try { while( !stmt_queue.empty() ) { switch( stmt_queue.front().first ) { case stmt_type::_create_table_stmt: xyzsql_process_create_table(); break; case stmt_type::_create_index_stmt: xyzsql_process_create_index(); break; case stmt_type::_select_stmt: xyzsql_process_select(); break; case stmt_type::_insert_stmt: xyzsql_process_insert(); break; case stmt_type::_delete_stmt: xyzsql_process_delete(); break; case stmt_type::_drop_table_stmt: xyzsql_process_drop_table(); break; case stmt_type::_drop_index_stmt: xyzsql_process_drop_index(); break; case stmt_type::_transaction_stmt: xyzsql_process_transaction(); break; case stmt_type::_commit_stmt: xyzsql_process_commit(); break; case stmt_type::_rollback_stmt: xyzsql_process_rollback(); break; case stmt_type::_quit_stmt: xyzsql_exit(); break; case stmt_type::_exefile_stmt: xyzsql_batch(); break; default: xyzsql_unknown_stmt(); } if ( stmt_queue.front().second != nullptr) delete stmt_queue.front().second; stmt_queue.pop(); } } catch( exception &t ) { cout << t.what() << endl; stmt_queue.pop(); } } xyzsql_finalize(); return 0; } <file_sep>#pragma once #ifndef __BUFFER_MANAGER_H__ #define __BUFFER_MANAGER_H__ #include <string> #include <vector> #include <map> #include "Block.h" #include "File.h" #include <iostream> #define BUFSIZE 128 #define MAXTIME 0xffffffff //#define MAXTIME 9 // for test only class BufferRecord { public: BufferRecord() { blocknum = -1; accessTime = 0; block = nullptr; filename = ""; dirty = false; } ~BufferRecord() { //if (block) // delete block; } inline bool equal(std::string filename, int num) { return this->filename == filename && this->blocknum == num; } int blocknum; std::string filename; unsigned int accessTime; Block * block; bool dirty; }; class BufferManager { public: static const std::string dataFileDir, trash; BufferManager(); void load(); ~BufferManager(); Block readBlock(std::string, int); void writeBlock(std::string, int, Block&); void createFile(std::string); BufferRecord* findQ(std::string, int); File* findFile(std::string); BufferRecord* insertQ(std::string, int, Block*); void flushQ(); void retimeQ(); // for test only void printQ() { std::cout << "=======================begin buffer =================\n"; for (auto i = bufQueue.begin(); i != bufQueue.end(); i++) { std::cout << i->dirty << " " << i->accessTime << " " << i->filename << " " << i->blocknum << std::endl; } std::cout << "=======================end buffer =================\n"; } void newTrashCan(); void appendTrashCan(int blocknum, int offset); bool fetchTrash(int&, int&); void beginFetchTrash(); void emptyTrashCan(); private: std::fstream *trashFile; std::map<std::string, File *> files; std::vector<BufferRecord> bufQueue; /*Block bufData[BUFSIZE];*/ unsigned int time; }; #endif <file_sep>/* * IndexManager.h * * Created on: 2014年11月7日 * Author: Admin */ #ifndef INDEXMANAGER_H_ #define INDEXMANAGER_H_ /*** * 1.选择操作 * 2.删除操作 * 3.插入数据操作 * 4.建立索引操作 * 5.删除索引操作 * * ******/ #include <iostream> #include <sstream> #include <stdint.h> #include <bitset> #include <queue> #include <string> #include "../buffer/BufferManager.h" #include "../buffer/Block.h" #define BLOCKSIZE 4096 //#define maxBTreeNode 100 //#define max(a,b) {if((a)>(b)) return (a); return (b);} using namespace std; class indexIterator; // class Block // { // public: // Block(){} // ~Block(){} // unsigned char getByte(int32_t i){return b[i];} // void setByte(unsigned char c, int32_t i){b[i]=c;} // void print(){// for test // for(int i=0;i<4096;i++) // { // cout<<(unsigned int)(b[i])<<" "; // if(i%16==15) // cout<<endl; // if(i%256==255) // cout<<"|||"<<i+1<<endl; // } // } // private: // unsigned char b[4096]; // }; // // // class BufferManager // { // public: // BufferManager(){} // ~BufferManager(){} // Block readBlock(string fileName,int32_t i){return blocks[i];} // void writeBlock(string fileName,int32_t i,Block &b){blocks[i]=b;} // void createFile(string fileName){} // void deleteFile(string fileName){} // private: // Block blocks[100]; // }; class blockAnalyzer { public: blockAnalyzer(Block b1,Block b2,BufferManager* myBfMgr){ b[0]=b1;b[1]=b2; a=(b[1].getByte(4088)<<24)+(b[1].getByte(4089)<<16)+(b[1].getByte(4090)<<8)+b[1].getByte(4091); myBufferManager=myBfMgr; } virtual ~blockAnalyzer(){} int32_t getNextEmptyBlock() { int32_t i; unsigned char c; uint8_t d; for(i=a;;i=(i+1)%8180) { if(i>4095) c=b[1].getByte(i-4096); else c=b[0].getByte(i); bitset<8> bt(c); if(c!=0xFF) { if(i!=0){ if(!bt[7]) {c+=0x80;d=0; break;} else if(!bt[6]) {c+=0x40;d=1; break;} } if(!bt[5]) {c+=0x20;d=2;break;} else if(!bt[4]) {c+=0x10;d=3;break;} else if(!bt[3]) {c+=0x08;d=4;break;} else if(!bt[2]) {c+=0x04;d=5;break;} else if(!bt[1]) {c+=0x02;d=6;break;} else if(!bt[0]) {c+=0x01;d=7;break;} } } if(i>4095) b[1].setByte(c,i-4096); else b[0].setByte(c,i); a=i; return (a<<3)+d; } int32_t getRootPosition() { int32_t r; r=(b[1].getByte(4092)<<24)+(b[1].getByte(4093)<<16)+(b[1].getByte(4094)<<8)+b[1].getByte(4095); return r; } void changeRootPos(int32_t Position) { b[1].setByte(Position>>24,4092); b[1].setByte(Position>>16,4093); b[1].setByte(Position>>8,4094); b[1].setByte(Position,4095); } void deleteBlock(int32_t blockPos) { uint8_t tmp=0; uint8_t tmp1=blockPos&0x7; unsigned char c; if(blockPos>32767) { blockPos-=32768; tmp=1; } c=b[tmp].getByte(blockPos>>3); bitset<8> bt(c); bt[7-tmp1]=0; c=(unsigned char)bt.to_ulong(); b[tmp].setByte(c,blockPos>>3); } void writeBack(string fileName) { b[1].setByte(a>>24,4088); b[1].setByte(a>>16,4089); b[1].setByte(a>>8,4090); b[1].setByte(a,4091); myBufferManager->writeBlock(fileName,0,b[0]); myBufferManager->writeBlock(fileName,1,b[1]); } private: int32_t a; Block b[2]; BufferManager* myBufferManager; }; class treeNode { public: treeNode(BufferManager* bfmgr,string type,int32_t charLen,int32_t blockNum,bool root, bool leaf) { myBufferManager=bfmgr; nodeSize=BLOCKSIZE; Root=root; Leaf=leaf; columnType=type; this->charLen=charLen; if(type=="int") { maxNumber=nodeSize/12-5; keyInt=new int[maxNumber]; keyFloat=NULL; keyStr=NULL; } if(type=="float") { maxNumber=nodeSize/12-5; keyFloat=new float[maxNumber]; keyInt=NULL; keyStr=NULL; } if(type=="char") { maxNumber=nodeSize/(charLen+8)-5; keyStr=new string[maxNumber]; keyInt=NULL; keyFloat=NULL; } children=new int32_t[maxNumber+1]; if(Leaf) blockOffset=new int32_t[maxNumber+1]; else blockOffset=NULL; blockNumber=blockNum; keyNumber=0; children[maxNumber]=0; } treeNode(BufferManager* bfmgr,Block b); virtual ~treeNode() { delete[] children; delete[] blockOffset; if(keyInt) delete[] keyInt; if(keyFloat) delete[] keyFloat; if(keyStr) delete[] keyStr; } bool compare(string condition,int32_t i); int deleteElement(blockAnalyzer* analyzer,string fileName,string value,string &newKey); void deleteFromLeaf(int32_t i); void deleteFromNonLeaf(int32_t i); bool exist(string value); bool enough(); bool enoughGive(); int32_t next(); int findPos(string condition); int32_t getBlock(string condition); int32_t getBlockNumber(){return blockNumber;} int32_t getBlockOffset(int32_t i){return blockOffset[i];} int32_t getCharLen(){return charLen;} int32_t getChild(int32_t i){return children[i];} string getColumnType(){return columnType;} string getKey(int32_t i); int32_t getKeyNumber(){return keyNumber;} int getLeaf(indexIterator &it,string fileName,string condition,int condType); int getLeftestLeaf(indexIterator &it,string fileName); int32_t getLinkedLeafNode(); bool isLeaf(){return Leaf;} int insert(blockAnalyzer* analyzer,string fileName,string value,int32_t recordBlockNumber,int32_t recordBlockOffset, int32_t& newBlockNumber,string& newValue); void merge(treeNode* t1, treeNode*t2,string keyValue); void moveForward(); void moveBackward(); void moveKeyBackward(int32_t i); void moveKeyForward(int32_t i); void print(){//for test cout<<"nodeSize"<<nodeSize<<endl; cout<<"blockNumber"<<blockNumber<<endl; cout<<"maxNumber"<<maxNumber<<endl; cout<<"keyNumber"<<keyNumber<<endl; cout<<"columnType"<<columnType<<endl; cout<<"charLen"<<charLen<<endl; cout<<"Leaf"<<Leaf<<endl; cout<<"Root"<<Root<<endl; } void setBlockOffset(int32_t i,int32_t offset); void setChild(int32_t i,int32_t blockNum); void setKey(int32_t i,string value); void setKeyNumber(int32_t i){keyNumber=i;} void setLinkedLeafNode(int32_t blockNum); void setRoot(bool state){Root=state;} Block transform(); void writeBack(string fileName); private: int32_t nodeSize; int32_t blockNumber; int32_t maxNumber; int32_t keyNumber; string columnType; int32_t charLen; string *keyStr; int *keyInt; float *keyFloat; int32_t *children; int32_t *blockOffset; bool Leaf; bool Root; BufferManager* myBufferManager; }; class IndexManager { public: IndexManager(BufferManager* bfmngr){ myAnalyzer=NULL; myBufferManager=bfmngr; } virtual ~IndexManager(){delete myAnalyzer;} int selectNode(indexIterator &iterator,string fileName, int condType ,string condition); int insertNode(string fileName, string value, int32_t recordBlockNumber, int32_t recordBlockOffset); int deleteNode(string fileName, string value); void createIndex(string fileName,string colType,int32_t charLen,int32_t number,string value[], int32_t blockNumber[], int32_t blockOffset[]); void dropIndex(string fileName); int getStarter(indexIterator &it,string fileName){ string tmp=""; selectNode(it,fileName,3,tmp); return 0; } void print(string fileName){ Block tmpBlock,tmpBlock2; queue<int32_t> myQueue; treeNode *tmpNode; if(lastFile!=fileName) { if(myAnalyzer!=NULL){ myAnalyzer->writeBack(fileName); delete myAnalyzer; } tmpBlock=myBufferManager->readBlock(fileName,0); tmpBlock2=myBufferManager->readBlock(fileName,1); myAnalyzer=new blockAnalyzer(tmpBlock,tmpBlock2,myBufferManager); cout<<"maybe"<<endl; getchar(); } int32_t blockPos=myAnalyzer->getRootPosition(); myQueue.push(blockPos); while(myQueue.size()!=0) { int32_t tmp1=myQueue.front(); myQueue.pop(); tmpBlock=myBufferManager->readBlock(fileName,tmp1); tmpNode=new treeNode(myBufferManager,tmpBlock); for(int32_t i=0;i<tmpNode->getKeyNumber();i++) cout<<tmpNode->getChild(i)<<" "<<tmpNode->getKey(i)<<" "; if(tmpNode->isLeaf()) cout<<" "<<tmpNode->getLinkedLeafNode(); else cout<<tmpNode->getChild(tmpNode->getKeyNumber()); cout<<endl; if(!tmpNode->isLeaf()) for(int32_t j=0;j<=tmpNode->getKeyNumber();j++) myQueue.push(tmpNode->getChild(j)); //cout<<"round"<<endl; //getchar(); } } Block newIndexHead(){ Block b; b.fillZero(); b.setByte(0x20,0); return b; } Block newIndexHead2(){ Block b; b.fillZero(); b.setByte(0x02,4095); return b; } //void analyzeIndex(Block* tmpBlock,string& colType, int32_t& charLen,int32_t& blockPos); private: string lastFile; blockAnalyzer* myAnalyzer; BufferManager *myBufferManager; }; class indexIterator { public: indexIterator(){ node = nullptr; } ~indexIterator(){ if (node) delete node; } int next(int32_t& t1,int32_t& t2) { if(i<node->getKeyNumber()) { t1=node->getChild(i); t2=node->getBlockOffset(i); i++; } else { int32_t a=node->getLinkedLeafNode(); if(a!=0) { delete node; node=new treeNode(myBufferManager,myBufferManager->readBlock(fileName,a)); t1=node->getChild(0); t2=node->getBlockOffset(0); i=1; } else return -1; } return 0; } void set(string f,BufferManager* b,treeNode* t,int32_t it){ node=t; fileName=f; myBufferManager=b; i=it; } private: treeNode* node; int32_t i; string fileName; BufferManager* myBufferManager; }; #endif /* INDEXMANAGER_H_ */ <file_sep>#pragma once #ifndef __RECORD_MANAGER_H__ #define __RECORD_MANAGER_H__ #include "record.h" #include "../catalog/catalog.h" #include "../parser/stmt.h" #include <vector> #include <cstdint> #include <fstream> #include "../buffer/BufferManager.h" #include "../buffer/Block.h" #include "../index/IndexManager.h" //#include "../catalog/catalog.h" class Cursor; class recordBlock : public Block { public: //const static int headWidth = 4; recordBlock() { } recordBlock(const Block& b) : Block(b) { }; void Init(int size) { char * headOfRecord; size += 4; // first int used to save pointer or flag uint32_t j = size; for (auto i = 0; i < BLOCKSIZE / size - 1; i++) { headOfRecord = this->dataPointer() + i * size; *reinterpret_cast<std::uint32_t *>(headOfRecord) = j; j += size; } headOfRecord = this->dataPointer() + (BLOCKSIZE / size - 1) * size; *reinterpret_cast<std::uint32_t *>(headOfRecord) = 0; } //inline std::uint32_t getRecordSize() { // return *reinterpret_cast<std::uint32_t *>(this->dataPointer()); //} //inline std::uint32_t getRecordCount() { // return *reinterpret_cast<std::uint32_t *>(this->dataPointer() + 4); //} bool insertRecord(Record r, int &offset) { char * headOfRecord = nullptr; int size = r.size() + 4; // first int used to save pointer or flag uint32_t j = size; bool ret = false; int i = *reinterpret_cast<std::uint32_t *>(this->dataPointer()); if (i == 0) { // empty free list, full block ret = false; } else { headOfRecord = this->dataPointer() + i; uint32_t next = *reinterpret_cast<std::uint32_t *>(headOfRecord); *reinterpret_cast<std::uint32_t *>(this->dataPointer()) = next; *reinterpret_cast<std::uint32_t *>(headOfRecord) = 0xffffffff; offset = i; headOfRecord += 4; for (i = 0; i < r.size(); i++) { *reinterpret_cast<unsigned char *>(headOfRecord + i) = r.buf[i]; } ret = true; } return ret; } void deleteRecord(int size, int offset) { char * headOfRecord = nullptr; std::uint32_t size_r = size + 1; uint32_t next = *reinterpret_cast<std::uint32_t *>(this->dataPointer()); *reinterpret_cast<std::uint32_t *>(this->dataPointer()) = offset; headOfRecord = this->dataPointer() + offset; *reinterpret_cast<std::uint32_t *>(headOfRecord) = next; } bool isValid(int offset) { char * headOfRecord = this->dataPointer() + offset; return (*reinterpret_cast<std::uint32_t *>(headOfRecord) == 0xffffffff); } std::vector<unsigned char> getRecord(int size, int offset) { char * headOfRecord = this->dataPointer() + offset; #ifdef CAN_THROW if (*reinterpret_cast<std::uint32_t *>(headOfRecord) < 0xffffffff) { throw std::exception("Record Manager: the record has been deleted"); } #endif std::vector<unsigned char> x; headOfRecord += 4; for (auto i = 0; i < size; i++) { x.push_back(*reinterpret_cast<unsigned char *>(headOfRecord + i)); } return x; } std::uint32_t getFreelist() { return getBlockCount(); } void setFreelist(std::uint32_t c) { setBlockCount(c); } void setBlockCount(std::uint32_t c) { *reinterpret_cast<std::uint32_t *>(this->dataPointer()) = c; } std::uint32_t getBlockCount() { return *reinterpret_cast<std::uint32_t *>(this->dataPointer()); } private: }; //class headerBlock : public Block { //public: // headerBlock () { // } // // headerBlock(Block &block):Block(block) { // // } //}; class RecordManager { public: const static std::string master; const static std::string trash; RecordManager(); void Init(BufferManager* BM, catalog_manager* CM, IndexManager * IM); ~RecordManager(); void createMaster(std::string tableName); void insertRecord(std::string tableName, Record newRecord, int&, int&); void deleteRecord(std::string tableName, int blocknum, int offset, int size); Record getRecord(std::string tableName, int blocknum, int offset, int size); Cursor* getCursor(std::string tableName, int size); private: BufferManager* bm; catalog_manager* cm; IndexManager* im; std::fstream *trashFile; Cursor *cursor; }; class Cursor { public: Cursor() { filename = ""; blockNum = 0; offset = 0; size = 0; maxBlockCount = 0; bm = nullptr; cm = nullptr; }; Cursor(BufferManager *bm, catalog_manager *cm, std::string tableName, int blockNum, int offset, int size, int maxBlockCount){ this->tableName = tableName; this->filename = tableName + "/" + RecordManager::master; this->blockNum = blockNum; this->offset = offset; this->size = size; this->maxBlockCount = maxBlockCount; this->bm = bm; this->cm = cm; this->endFlag = false; block = bm->readBlock(filename, blockNum); }; ~Cursor() { }; bool next() { int i = blockNum; int j = offset; int maxRecordCount = BLOCKSIZE / (size + 4)- 1; int maxOffset = maxRecordCount * (size + 4); // bool finish = false; while(i <= maxBlockCount && j <= maxOffset) { j += (size + 4); if (block.isValid(j)) { // finish = true; break; } if (j > maxOffset) { j = 0; i = i + 1; if (i <= maxBlockCount) { block = bm->readBlock(filename, i); } } } if (i > maxBlockCount) { return false; } blockNum = i; offset = j; return true; } Record getRecord() { auto catm = cm->exist_relation(tableName); return Record(block.getRecord(size, offset), catm->cols); } private: BufferManager* bm; catalog_manager* cm; std::string tableName; std::string filename; int blockNum; int offset; int size; int maxBlockCount; recordBlock block; bool endFlag; }; #endif <file_sep>/* * IndexManager.cpp * * Created on: 2014年11月7日 * Author: Admin */ #include "IndexManager.h" #include <string> #include <sstream> #include <stdint.h> #include <cmath> using namespace std; treeNode::treeNode(BufferManager* bfmgr,Block b) { myBufferManager=bfmgr; nodeSize=BLOCKSIZE; blockNumber=(b.getByte(0)<<24)+(b.getByte(1)<<16)+(b.getByte(2)<<8)+b.getByte(3); maxNumber=(b.getByte(4)<<24)+(b.getByte(5)<<16)+(b.getByte(6)<<8)+b.getByte(7); keyNumber=(b.getByte(8)<<24)+(b.getByte(9)<<16)+(b.getByte(10)<<8)+b.getByte(11); unsigned char c=b.getByte(12); if(c=='f') columnType="float"; else if(c=='i') columnType="int"; else if(c=='c') columnType="char"; Root=(bool)b.getByte(13); Leaf=(bool)b.getByte(14); charLen=(b.getByte(16)<<24)+(b.getByte(17)<<16)+(b.getByte(18)<<8)+b.getByte(19); if(columnType=="char") { keyStr=new string[maxNumber]; keyInt=NULL; keyFloat=NULL; } else if(columnType=="int") { keyStr=NULL; keyInt=new int[maxNumber]; keyFloat=NULL; } else if(columnType=="float") { keyStr=NULL; keyInt=NULL; keyFloat=new float[maxNumber]; } if(Leaf) blockOffset=new int32_t[maxNumber+1]; else blockOffset=NULL; children=new int32_t[maxNumber+1]; int32_t j=20; for(int32_t i=0;i<keyNumber;i++) { if(columnType=="char") { for(int32_t k=0;;k++) { unsigned char c=b.getByte(j+i*(charLen+1)+k); if(c==0) break; else keyStr[i].append(1,c); } } if(columnType=="int") keyInt[i]=(b.getByte(j+i*4)<<24)+(b.getByte(j+4*i+1)<<16)+(b.getByte(j+4*i+2)<<8)+b.getByte(j+4*i+3); if(columnType=="float") { int a=(b.getByte(j+4*i)<<24)+(b.getByte(j+4*i+1)<<16)+(b.getByte(j+4*i+2)<<8)+b.getByte(j+4*i+3); keyFloat[i]=*(float *)&a; } } if(columnType=="char") j=j+maxNumber*(charLen+1); else j=j+maxNumber*4; for(int32_t i=0;i<keyNumber+1;i++) children[i]=(b.getByte(j+4*i)<<24)+(b.getByte(j+4*i+1)<<16)+(b.getByte(j+4*i+2)<<8)+b.getByte(j+4*i+3); if(Leaf) children[maxNumber]=(b.getByte(j+4*maxNumber)<<24)+(b.getByte(j+4*maxNumber+1)<<16)+(b.getByte(j+4*maxNumber+2)<<8)+b.getByte(j+4*maxNumber+3); j=j+(maxNumber+1)*4; if(Leaf) for(int32_t i=0;i<keyNumber+1;i++) blockOffset[i]=(b.getByte(j+4*i)<<24)+(b.getByte(j+4*i+1)<<16)+(b.getByte(j+4*i+2)<<8)+b.getByte(j+4*i+3); } //return true, when condition>=key[i] bool treeNode::compare(string condition,int32_t i) { stringstream ss; int n; float f; if(columnType=="char") return condition>=keyStr[i]; if(columnType=="int") { ss<<condition; ss>>n; return n>=keyInt[i]; } if(columnType=="float") { ss<<condition; ss>>f; return f>=keyFloat[i]; } } //find the proper position in treeNode structure to insert condition int treeNode::findPos(string condition) { int i; for(i=0;i<keyNumber;i++) { if(compare(condition,i)) continue; break; } return i; } //if the value exist in the array of keys in treeNode, return true bool treeNode::exist(string value) { stringstream ss; ss<<value; int n; float f; ss>>n; ss.clear(); ss<<value; ss>>f; for(int32_t i=0;i<keyNumber;i++) { if(columnType=="char"&&keyStr[i]==value){//cout<<value<<endl; return true;} if(columnType=="int"&&keyInt[i]==n){ return true;} if(columnType=="float"&&keyFloat[i]==f){ return true;} } return false; } // if the key is enough, return true bool treeNode::enough() { if(Root&&Leaf) return true; if(Root) return keyNumber>=1; if(Leaf) { if(keyNumber>=ceil((double)maxNumber/2)) return true; } else if(keyNumber>=ceil((double)(maxNumber+1)/2)-1) return true; return false; } //if the node is enough to give a value or child bool treeNode::enoughGive() { if(Root&&Leaf) return true; if(Root) return keyNumber>=2; if(Leaf) { if(keyNumber>=1+ceil((double)maxNumber/2)) return true; } else if(keyNumber>=ceil((double)(maxNumber+1)/2)) return true; return false; } //move i'th key back 0->1 void treeNode::moveKeyBackward(int32_t i) { if(columnType=="char") keyStr[i+1]=keyStr[i]; if(columnType=="int") keyInt[i+1]=keyInt[i]; if(columnType=="float") keyFloat[i+1]=keyFloat[i]; } //move i'th key forward 0<-1 void treeNode::moveKeyForward(int32_t i) { if(columnType=="char") keyStr[i-1]=keyStr[i]; if(columnType=="int") keyInt[i-1]=keyInt[i]; if(columnType=="float") keyFloat[i-1]=keyFloat[i]; } //get i'th key, output as string string treeNode::getKey(int32_t i) { stringstream ss; string result; if(columnType=="char") return keyStr[i]; if(columnType=="int") { ss<<keyInt[i]; ss>>result; return result; } if(columnType=="float") { ss<<keyFloat[i]; ss>>result; return result; } } //set the value to i'th key position void treeNode::setKey(int32_t i,string value) { stringstream ss; int n; float f; if(columnType=="char") { keyStr[i]= value; } if(columnType=="int") { ss<<value; ss>>n; keyInt[i]=n; } if(columnType=="float") { ss<<value; ss>>f; keyFloat[i]=f; } } //set the value to i'th child position void treeNode::setChild(int32_t i,int32_t blockNum) { children[i]=blockNum; } //delete key[i], children[i], blockOffset[i] from leaf node void treeNode::deleteFromLeaf(int32_t i) { for(int32_t tmp=i;tmp<keyNumber-1;tmp++) { moveKeyForward(tmp+1); children[tmp]=children[tmp+1]; blockOffset[tmp]=blockOffset[tmp+1]; } keyNumber--; } //delete key[i],children[i+1] from nonleaf node void treeNode::deleteFromNonLeaf(int32_t i) { for(int32_t tmp=i+1; tmp<=keyNumber-1;tmp++) { moveKeyForward(tmp); children[tmp]=children[tmp+1]; } keyNumber--; } //move everything forward 0<-1 void treeNode::moveForward() { if(Leaf) for(int32_t tmp=0;tmp<keyNumber-1;tmp++) { moveKeyForward(tmp+1); children[tmp]=children[tmp+1]; blockOffset[tmp]=blockOffset[tmp+1]; } else { for(int32_t tmp=0;tmp<keyNumber-1;tmp++) { moveKeyForward(tmp+1); children[tmp]=children[tmp+1]; } children[keyNumber-1]=children[keyNumber]; } keyNumber--; } //move everything backward 1<-0 void treeNode::moveBackward() { if(Leaf) { for(int32_t i=keyNumber-1;i>=0;i--) { moveKeyBackward(i); children[i+1]=children[i]; blockOffset[i+1]=blockOffset[i]; } } else { children[keyNumber+1]=children[keyNumber]; for(int32_t i=keyNumber-1;i>=0;i--) { moveKeyBackward(i); children[i+1]=children[i]; } } keyNumber++; } //set the value to i'th blockOffset position void treeNode::setBlockOffset(int32_t i,int32_t offset) { blockOffset[i]=offset; } //get linked leafNode int32_t treeNode::getLinkedLeafNode() { return children[maxNumber]; } //set linked leafNode void treeNode::setLinkedLeafNode(int32_t blockNumber) { children[maxNumber]=blockNumber; } //in index file, get the block number of the leaf node which may contain an element whose value is equal to condition int treeNode::getLeaf(indexIterator &it,string fileName,string condition,int condType) { //得到condition条件所对应的叶子节点的block号 int32_t i=findPos(condition); //if(Leaf&&!exist(condition)) //return -1; if(Leaf) { if(exist(condition)) { if(condType==1||condType==4) it.set(fileName,myBufferManager,this,i-1); if(condType==2) it.set(fileName,myBufferManager,this,i); return 0; } else { it.set(fileName,myBufferManager,this,i); } return -1; } Block tmpBlock=myBufferManager->readBlock(fileName, children[i]); treeNode* tmpNode=new treeNode(myBufferManager,tmpBlock); int result=tmpNode->getLeaf(it,fileName,condition,condType); if(!tmpNode->Leaf) delete tmpNode; return result; } //in index file, get the leftest leaf node's block number int treeNode::getLeftestLeaf(indexIterator &it,string fileName) { if(Leaf) { it.set(fileName,myBufferManager,this,0); return 0; } Block tmpBlock=myBufferManager->readBlock(fileName, children[0]); treeNode* tmpNode=new treeNode(myBufferManager,tmpBlock); int result=tmpNode->getLeftestLeaf(it,fileName); if(!tmpNode->Leaf) delete tmpNode; return result; } //merge void treeNode::merge(treeNode* t1, treeNode*t2,string keyValue) { int32_t i,j; if(t1->Leaf) { t1->setLinkedLeafNode(t2->getLinkedLeafNode()); for(i=t1->keyNumber,j=0;;i++,j++) { if(j<=t2->keyNumber-1) { t1->setKey(i,t2->getKey(j)); t1->setChild(i,t2->getChild(j)); t1->setBlockOffset(i,t2->getBlockOffset(j)); t1->keyNumber++; } else break; } } else { t1->setKey(t1->keyNumber,keyValue); t1->keyNumber++; for(i=t1->keyNumber,j=0;;j++,i++) { if(j<=t2->keyNumber-1) { t1->setKey(i,t2->getKey(j)); t1->setChild(i,t2->getChild(j)); t1->keyNumber++; } else break; } t1->setChild(i,t2->getChild(t2->keyNumber)); } } //transform to block Block treeNode::transform() { Block b; b.fillZero(); b.setByte(blockNumber>>24,0); b.setByte(blockNumber>>16,1); b.setByte(blockNumber>>8,2); b.setByte(blockNumber,3); b.setByte(maxNumber>>24,4); b.setByte(maxNumber>>16,5); b.setByte(maxNumber>>8,6); b.setByte(maxNumber,7); b.setByte(keyNumber>>24,8); b.setByte(keyNumber>>16,9); b.setByte(keyNumber>>8,10); b.setByte(keyNumber,11); b.setByte(columnType[0],12); // 'c','i','f' b.setByte(Root,13); b.setByte(Leaf,14); b.setByte(charLen>>24,16); b.setByte(charLen>>16,17); b.setByte(charLen>>8,18); b.setByte(charLen,19); int32_t i=0; int32_t j=20; //cout<<"in transform1"<<endl; for(i=0;i<keyNumber;i++) { if(columnType=="char") { for(int32_t k=0;;k++) { if(keyStr[i].length()>=k) b.setByte((char)(keyStr[i][k]),j+i*(charLen+1)+k); else { b.setByte(0,j+i*(charLen+1)+k); break; } } } if(columnType=="int") { stringstream ss; ss<<keyInt[i]; int32_t a; ss>>a; b.setByte(a>>24,j+4*i); b.setByte(a>>16,j+4*i+1); b.setByte(a>>8,j+4*i+2); b.setByte(a,j+4*i+3); } if(columnType=="float") { stringstream ss; float ff; ss<<keyFloat[i]; ss>>ff; b.setByte(((*(int *)&ff)>>24)&0xFF,j+4*i); b.setByte(((*(int *)&ff)>>16)&0xFF,j+4*i+1); b.setByte(((*(int *)&ff)>>8)&0xFF,j+4*i+2); b.setByte((*(int *)&ff)&0xFF,j+4*i+3); } } //cout<<"in transform2"<<endl; if(columnType=="char") j=j+maxNumber*(charLen+1); else j=j+maxNumber*4; for(i=0;i<keyNumber+1;i++) { b.setByte((children[i]>>24)&0xFF,j+i*4); b.setByte((children[i]>>16)&0xFF,j+i*4+1); b.setByte((children[i]>>8)&0xFF,j+i*4+2); b.setByte(children[i]&0xFF,j+i*4+3); } if(Leaf) { i=maxNumber; b.setByte((children[i]>>24)&0xFF,j+i*4); b.setByte((children[i]>>16)&0xFF,j+i*4+1); b.setByte((children[i]>>8)&0xFF,j+i*4+2); b.setByte(children[i]&0xFF,j+i*4+3); //cout<<children[i]<<endl;//////////////////////////// } j=j+(maxNumber+1)*4; //cout<<"in transform3"<<endl; if(Leaf) { int32_t i=0; for(i=0;i<keyNumber+1;i++) { b.setByte((blockOffset[i]>>24)&0xFF,j+4*i); b.setByte((blockOffset[i]>>16)&0xFF,j+4*i+1); b.setByte((blockOffset[i]>>8)&0xFF,j+4*i+2); b.setByte(blockOffset[i]&0xFF,j+4*i+3); } } //cout<<"in transform"<<endl; return b; } //write the treeNode back to file void treeNode::writeBack(string fileName) { Block t=transform(); //cout<<"in writeBack"<<endl; //t.print(); myBufferManager->writeBlock(fileName,blockNumber,t); } int treeNode::insert(blockAnalyzer* analyzer ,string fileName,string value, int32_t recordBlockNumber,int32_t recordBlockOffset,int32_t& newBlockNumber,string& newValue) { Block tmpBlock; treeNode* tmpNode; int p=findPos(value); int i,j; if(Leaf) // this node is leaf node { //cout<<"in node::insert1"<<endl;///////////////// //cout<<"p"<<" "<<p<<endl;/////////////// //cout<<"keyNumber"<<keyNumber<<endl;///////////////// if(exist(value)) { cerr<<"duplicate"<<endl; return -1; } else if(keyNumber<maxNumber) //do not need to split { //cout<<"in node::insert2"<<endl;///////////////////////// if(keyNumber!=0) for(i=keyNumber-1;i>=0;i--) { if(i>=p) { children[i+1]=children[i]; blockOffset[i+1]=blockOffset[i]; moveKeyBackward(i); } else break; } keyNumber++; //cout<<"in node::insert3"<<endl;///////////////// setKey(p,value); children[p]=recordBlockNumber; blockOffset[p]=recordBlockOffset; writeBack(fileName); return 0; } else // need to split { newBlockNumber=analyzer->getNextEmptyBlock(); tmpNode=new treeNode(myBufferManager,columnType,charLen,newBlockNumber,false,true); bool tmpState=false; // if(it==122) // { // cout<<"leaf"<<blockNumber<<endl; // cout<<"leaf"<<newBlockNumber<<endl; // getchar(); // } //cout<<"in node::insert4"<<endl; for(i=maxNumber-1,j=floor((double)(maxNumber+1)/2)-1; j>=0 ; j--) { if(!tmpState&&p>i) { tmpNode->setKey(j,value); tmpNode->setChild(j,recordBlockNumber); tmpNode->setBlockOffset(j,recordBlockOffset); tmpState=true; continue; } tmpNode->setKey(j,getKey(keyNumber-1)); tmpNode->setChild(j,getChild(keyNumber-1)); tmpNode->setBlockOffset(j,getBlockOffset(keyNumber-1)); keyNumber--; i--; } // if(it==122) // { // // getchar(); // } //cout<<"in node::insert5"<<endl; tmpNode->keyNumber=floor((double)(maxNumber+1)/2); //cout<<"tmpNode->keyNumber"<<tmpNode->keyNumber<<endl;/////////////////////////// if(!tmpState) { for(i=keyNumber-1;;i--) { if(p>i) { setKey(i+1,value); setChild(i+1,recordBlockNumber); setBlockOffset(i+1,recordBlockOffset); keyNumber++; break; } moveKeyBackward(i); children[i+1]=children[i]; blockOffset[i+1]=blockOffset[i]; } } tmpNode->setLinkedLeafNode(getLinkedLeafNode()); setLinkedLeafNode(newBlockNumber); newValue=tmpNode->getKey(0); //cout<<"newValue"<<newValue<<endl;//////////////////////////// // if(it==122) // { // int i; // for(i=0;i<tmpNode->keyNumber;i++) // { // cout<<tmpNode->children[i]<<" "<<tmpNode->keyStr[i]<<" "; // } // cout<<endl; // for(i=0;i<keyNumber;i++) // cout<<children[i]<<" "<<keyStr[i]<<" "; // cout<<endl; // getchar(); // } if(Root) setRoot(false); writeBack(fileName); tmpNode->writeBack(fileName); delete tmpNode; return 1; } } else//this node is not leaf node { int32_t blockPos; string newKey; // if(it==122) // {cout<<p<<endl; // cout<<children[p]<<endl; // getchar();} tmpBlock=myBufferManager->readBlock(fileName,children[p]); tmpNode=new treeNode(myBufferManager,tmpBlock); //cout<<"in node::insert nonleaf node1"<<" "<<tmpNode->isLeaf()<<" "<<Root<<" "<<p<<" "<<children[p]<<endl; int result = tmpNode->insert(analyzer,fileName,value,recordBlockNumber,recordBlockOffset,blockPos,newKey); delete tmpNode; if(result == -1)//duplicate return -1; if(result == 0) //insert complete return 0; if(result == 1)// sub tree is split { // if(it==122) // { // cout<<"root split1"<<endl; // cout<<blockPos<<endl; // cout<<result<<endl; // getchar(); // } if(keyNumber<maxNumber)// the current Node do not need to split { // if(it==122) // { // cout<<"root split2"<<endl; // cout<<blockPos<<endl; // cout<<result<<endl; // getchar(); // } //cout<<"I am right"<<endl;//////////////////////////// for(int32_t k=keyNumber-1;k>=p;k--) { moveKeyBackward(k); children[k+2]=children[k+1]; } keyNumber++; //cout<<"p"<<p<<endl;///////////////////////// //cout<<"newKey"<<newKey<<endl;///////////////// //cout<<"blockPos"<<blockPos<<endl;////////////// setKey(p,newKey); setChild(p+1,blockPos); // if(it==122) // { // cout<<"root split2"<<endl; // cout<<blockPos<<endl; // cout<<result<<endl; // getchar(); // } writeBack(fileName); return 0; } else //the current Node need to split too { // if(it==122) // { // cout<<"root split"<<endl; // cout<<blockPos<<endl; // getchar(); // } if(Root) setRoot(false); newBlockNumber = analyzer->getNextEmptyBlock(); tmpNode=new treeNode(myBufferManager,columnType,charLen,newBlockNumber,false,false); bool tmpState=false; //cout<<"in nonleaf node split ok0"<<endl; for(j=maxNumber-1,i=floor((double)maxNumber/2)-1;i>=0;i--) { if(!tmpState&&p>j) { tmpNode->setChild(i+1,blockPos); tmpNode->setKey(i,newKey); tmpState=true; continue; } tmpNode->setChild(i+1,getChild(keyNumber)); tmpNode->setKey(i,getKey(keyNumber-1)); keyNumber--; j--; } tmpNode->keyNumber=floor((double)maxNumber/2); //cout<<"in nonleaf node split ok1"<<endl; if(!tmpState&&p>j) { newValue=newKey; tmpNode->setChild(0,blockPos); } else { newValue=getKey(keyNumber-1); tmpNode->setChild(0,getChild(keyNumber)); keyNumber--; if(!tmpState) { for(i=keyNumber-1;;i--) { if(p>i) break; moveKeyBackward(i); children[i+2]=children[i+1]; } setKey(p,newKey); setChild(p+1,blockPos); keyNumber++; } } //cout<<"in nonleaf node split ok2"<<endl; //cout<<"tmpNode"<<tmpNode->keyNumber<<endl; //cout<<keyNumber<<endl; writeBack(fileName); tmpNode->writeBack(fileName); delete tmpNode; return 1; } } } } int treeNode::deleteElement(blockAnalyzer* analyzer,string fileName,string value,string &newKey) { int32_t p=findPos(value); if(Leaf)//this node is leaf { if(!exist(value))//value not exist { cerr<<"value not exist"<<endl; return -1; } else // value is exist in the leaf node { if(enoughGive()) // enough element to delete { deleteFromLeaf(p-1); if(p==1) { newKey=getKey(0); writeBack(fileName); return 1; } writeBack(fileName); return 0; } else//not enough after delete, has to merge or borrow from sibling { deleteFromLeaf(p-1); if(p==1) { newKey=getKey(0); return 3; } return 2; } } } else// this node is nonleaf { Block tmpBlock=myBufferManager->readBlock(fileName,children[p]); treeNode *tmpNode=new treeNode(myBufferManager,tmpBlock); string newValue; int result = tmpNode->deleteElement(analyzer,fileName,value,newValue); if(result==0||result==-1) { delete tmpNode; writeBack(fileName); return result; } if(result==1) { delete tmpNode; if(p!=0) { setKey(p-1,newValue); writeBack(fileName); return 0; } else { if(Root) return 0; newKey=newValue; writeBack(fileName); return 1; } } if(result==2||result==3) { //cout<<"ok??"<<endl; Block tmpBlock1; treeNode* tmpNode1; if(p!=keyNumber) //merge or borrow from back { tmpBlock1=myBufferManager->readBlock(fileName,children[p+1]); tmpNode1=new treeNode(myBufferManager,tmpBlock1); if(!tmpNode1->enoughGive())//merge { merge(tmpNode,tmpNode1,getKey(p)); //cout<<"p"<<p<<endl; deleteFromNonLeaf(p); analyzer->deleteBlock(tmpNode1->getBlockNumber()); delete tmpNode1; if(Root&&!enough()) { tmpNode->setRoot(true); tmpNode->writeBack(fileName); analyzer->changeRootPos(tmpNode->getBlockNumber()); analyzer->deleteBlock(getBlockNumber()); delete tmpNode; return 4; } tmpNode->writeBack(fileName); delete tmpNode; } else //borrow { //cout<<"ok???"<<endl; if(tmpNode->Leaf) // subNode is leaf { tmpNode->setKey(tmpNode->keyNumber,tmpNode1->getKey(0)); tmpNode->setChild(tmpNode->keyNumber,tmpNode1->getChild(0)); tmpNode->setBlockOffset(tmpNode->keyNumber,tmpNode1->getBlockOffset(0)); tmpNode->keyNumber++; tmpNode1->moveForward(); setKey(p,tmpNode1->getKey(0)); } else //subNode is not leaf { tmpNode->setKey(tmpNode->keyNumber,getKey(p)); tmpNode->setChild(tmpNode->keyNumber+1,tmpNode1->getChild(0)); tmpNode->keyNumber++; setKey(p,tmpNode1->getKey(0)); tmpNode1->moveForward(); } tmpNode->writeBack(fileName); tmpNode1->writeBack(fileName); delete tmpNode; delete tmpNode1; } if(enough()) { if(result==3) { if(p!=0) setKey(p-1,newValue); else { newKey=newValue; writeBack(fileName); return 1; } } writeBack(fileName); return 0; } else { if(result==3) { if(p!=0) setKey(p-1,newValue); else { newKey=newValue; return 3; } } return 2; } } else //merge or borrow from forward { tmpBlock1=myBufferManager->readBlock(fileName,children[p-1]); tmpNode1=new treeNode(myBufferManager,tmpBlock1); if(!tmpNode1->enoughGive())//merge { //cout<<"ok?????"<<endl; merge(tmpNode1,tmpNode,getKey(p-1)); deleteFromNonLeaf(p-1); analyzer->deleteBlock(tmpNode->getBlockNumber()); delete tmpNode; if(Root&&!enough()) { tmpNode1->setRoot(true); analyzer->changeRootPos(tmpNode1->getBlockNumber()); // cout<<"rootPos"<<tmpNode1->getBlockNumber()<<endl; // // cout<<"currootPos"<<analyzer->getRootPosition()<<endl; // getchar(); analyzer->deleteBlock(getBlockNumber()); tmpNode1->writeBack(fileName); delete tmpNode1; return 4; } tmpNode1->writeBack(fileName); delete tmpNode1; } else //borrow { if(tmpNode->Leaf) { tmpNode->moveBackward(); tmpNode->setKey(0,tmpNode1->getKey(tmpNode1->keyNumber-1)); tmpNode->setChild(0,tmpNode1->getChild(tmpNode1->keyNumber-1)); tmpNode->setBlockOffset(0,tmpNode1->getBlockOffset(tmpNode1->keyNumber-1)); tmpNode1->keyNumber--; setKey(p-1,tmpNode->getKey(0)); } else { tmpNode->moveBackward(); if(result==2) tmpNode->setKey(0,getKey(p-1)); else tmpNode->setKey(0,newValue); tmpNode->setChild(0,tmpNode1->getChild(tmpNode1->keyNumber)); setKey(p-1,tmpNode1->getKey(tmpNode1->keyNumber-1)); tmpNode1->keyNumber--; } tmpNode1->writeBack(fileName); tmpNode->writeBack(fileName); delete tmpNode1; delete tmpNode; } if(enough()) { writeBack(fileName); return 0; } else { return 2; } } } } } int IndexManager::selectNode(indexIterator &iterator,string fileName, int condType ,string condition) { int result; Block tmpBlock; Block tmpBlock2; treeNode *currentNode; int32_t blockPos1; int32_t blockOffset1; if(lastFile!=fileName) { if(myAnalyzer!=NULL){ myAnalyzer->writeBack(lastFile); delete myAnalyzer; } tmpBlock=myBufferManager->readBlock(fileName,0); tmpBlock2=myBufferManager->readBlock(fileName,1); myAnalyzer=new blockAnalyzer(tmpBlock,tmpBlock2,myBufferManager); lastFile = fileName; } blockPos1=myAnalyzer->getRootPosition(); tmpBlock=myBufferManager->readBlock(fileName,blockPos1);//得到root所在的Block currentNode=new treeNode(myBufferManager,tmpBlock);//用Block还原出root //"=" 1 ,">" 2, "<" 3, ">=" 4, "<=" 5 if(condType==3||condType==5)//where的条件为小于或小于等于 result=currentNode->getLeftestLeaf(iterator,fileName); else result=currentNode->getLeaf(iterator,fileName,condition,condType); if(!currentNode->isLeaf()) delete currentNode; if(condType==1&&result==-1) { // delete currentNode; return -1; } return 0; } int IndexManager::insertNode(string fileName, string value, int32_t recordBlockNumber, int32_t recordBlockOffset) { Block tmpBlock; Block tmpBlock2; treeNode* tmpNode; int32_t blockPos; string newKey; int32_t newBlock; if(lastFile!=fileName) { if(myAnalyzer!=NULL){ myAnalyzer->writeBack(lastFile); delete myAnalyzer; } tmpBlock=myBufferManager->readBlock(fileName,0); tmpBlock2=myBufferManager->readBlock(fileName,1); myAnalyzer=new blockAnalyzer(tmpBlock,tmpBlock2,myBufferManager); lastFile = fileName; } //cout<<"in manager::insertNode1"<<endl;////////////////////////////// blockPos=myAnalyzer->getRootPosition(); //analyzeIndex(tmpBlock,colType,charLen,blockPos); tmpBlock=myBufferManager->readBlock(fileName,blockPos); tmpNode=new treeNode(myBufferManager,tmpBlock);//得到根节点 //cout<<"in manager::insertNode2"<<endl;//////////////////////////////////////////// int result = tmpNode->insert(myAnalyzer,fileName,value,recordBlockNumber,recordBlockOffset,newBlock,newKey); //cout<<"in manager::insertNode3"<<endl;////////////////////////////////// if(result==-1)//insert complete { delete tmpNode; return -1; } else if(result==0) { //do nothing } else if(result==1)//create a new Root { int32_t newRoot; newRoot=myAnalyzer->getNextEmptyBlock(); treeNode *newNode=new treeNode(myBufferManager,tmpNode->getColumnType(),tmpNode->getCharLen(),newRoot,true,false); newNode->setChild(0,blockPos); newNode->setKey(0,newKey); newNode->setChild(1,newBlock); newNode->setKeyNumber(1); newNode->writeBack(fileName); delete newNode; myAnalyzer->changeRootPos(newRoot); } //cout<<"in manager::insertNode4"<<endl;/////////////////////////////////// delete tmpNode; return 0; } int IndexManager::deleteNode(string fileName, string value) { Block tmpBlock1,tmpBlock2; int32_t blockPos; treeNode *tmpNode; string newKey; //cout<<"ok1"<<endl; if(lastFile!=fileName) { if(myAnalyzer!=NULL) { myAnalyzer->writeBack(lastFile); delete myAnalyzer; } tmpBlock1=myBufferManager->readBlock(fileName,0); tmpBlock2=myBufferManager->readBlock(fileName,1); myAnalyzer=new blockAnalyzer(tmpBlock1,tmpBlock2,myBufferManager); lastFile = fileName; } blockPos=myAnalyzer->getRootPosition(); tmpBlock1=myBufferManager->readBlock(fileName,blockPos); tmpNode=new treeNode(myBufferManager,tmpBlock1); //cout<<"ok2"<<endl; int result=tmpNode->deleteElement(myAnalyzer,fileName,value,newKey); //cout<<"ok?"<<endl; if(result == -1) { delete tmpNode; return -1; } if(result == 0 || result == 1) tmpNode->writeBack(fileName); delete tmpNode; return 0; } void IndexManager::createIndex(string fileName,string colType,int32_t charLen,int32_t number,string value[],int32_t blockNumber[],int32_t blockOffset[]) { Block block1=newIndexHead(); Block block2=newIndexHead2(); treeNode* tmpNode=new treeNode(myBufferManager,colType,charLen,2,true,true); //tmpNode->print(); myBufferManager->createFile(fileName); myBufferManager->writeBlock(fileName,0,block1); myBufferManager->writeBlock(fileName,1,block2); //myBufferManager->printQ(); myAnalyzer=new blockAnalyzer(block1,block2,myBufferManager); // cout<<myAnalyzer->getNextEmptyBlock()<<endl; // cout<<myAnalyzer->getRootPosition()<<endl; tmpNode->writeBack(fileName); //cout<<"ok1"<<endl; delete tmpNode; lastFile=fileName; //cout<<"ok3"<<endl; for(int32_t i=0;i<number;i++) insertNode(fileName,value[i],blockNumber[i],blockOffset[i]); //print(fileName); // Block block3=myBufferManager->readBlock(fileName,2); // block3.print(); } void IndexManager::dropIndex(string fileName) { //myBufferManager->deleteFile(fileName); if(lastFile==fileName && myAnalyzer!=NULL) { delete myAnalyzer; myAnalyzer=NULL; } } <file_sep>#include "BufferManager.h" #include <fstream> #include <iostream> #include <algorithm> const std::string BufferManager::dataFileDir = ""; const std::string BufferManager::trash = "trash.tmp"; BufferManager::BufferManager() { //load(); } void BufferManager::load() { } void BufferManager::createFile(std::string filename) { //TODO not check exists file files.insert(std::pair<std::string, File*>(filename, new File(dataFileDir + filename))); } BufferManager::~BufferManager() { } File* BufferManager::findFile(std::string filename) { std::string path = dataFileDir + filename; auto f = files.find(filename); File *file = nullptr; if (f != files.end()) { file = f->second; } else { File *n = new File(); n->load(path); files.insert(std::pair<std::string, File*>(filename, n)); file = n; } return file; } BufferRecord *BufferManager::findQ(std::string filename, int index) { time++; if (time > MAXTIME) retimeQ(); BufferRecord *br = nullptr; for (auto i = bufQueue.begin(); i != bufQueue.end(); i++) { if (i->equal(filename, index)) { br = &(*i); //i->accessTime++; wrong i->accessTime = time; // std::cout << filename << " " << index << " " << "hit" << std::endl; break; } } return br; } BufferRecord *BufferManager::insertQ(std::string filename, int num, Block* b) { unsigned int minTime = MAXTIME; auto old = bufQueue.begin(); BufferRecord *ret = nullptr; if (bufQueue.size() < BUFSIZE) { // not full BufferRecord newOne; newOne.filename = filename; newOne.blocknum = num; newOne.accessTime = time; newOne.block = b; bufQueue.push_back(newOne); ret = &bufQueue[bufQueue.size() - 1]; } else { // full for (auto i = bufQueue.begin(); i != bufQueue.end(); i++) { if (i->accessTime < minTime) { minTime = i->accessTime; old = i; } } File *f = nullptr; if (old->dirty) { f = findFile(old->filename); f->write(old->blocknum, old->block); } //f = findFile(filename); delete old->block; old->filename = filename; old->blocknum = num; //old->block = f->read(num); got memory leak here old->block = b; old->dirty = false; old->accessTime = time; ret = &(*old); } return ret; } Block BufferManager::readBlock(std::string filename, int index) { std::string path = dataFileDir + filename; BufferRecord *record = findQ(filename, index); if (!record) { File* f = findFile(filename); record = insertQ(filename, index, f->read(index)); } return *(record->block); // end //auto f = files.find(filename); //if (f != files.end()) { // // find in files // return f->second->read(index); //} else { // // check disk // File *n = new File(); // n->load(filename); // if (n->isAlive()) { // files.insert(std::pair<std::string, File*>(filename, n)); // return n->read(index); // } //} } void BufferManager::writeBlock(std::string filename, int index, Block& block) { std::string path = dataFileDir + filename; BufferRecord *record = findQ(filename, index); if (!record) { File* f = findFile(filename); //TODO no need to read f, can use a blank block Block *blank = new Block; record = insertQ(filename, index, blank); } *(record->block) = block; record->dirty = true; //std::string fname = dataFileDir + filename; //auto f = files.find(fname); //if (f != files.end()) { // // find in files // f->second->write(index, block); // return; //} else { // // check disk // File *n = new File(); // n->load(fname); // if (n->isAlive()) { // files.insert(std::pair<std::string, File*>(filename, n)); // n->write(index, block); // return; // } //} //std::cout << "Not write anything" << std::endl; ///*files[filename]->write(index, block);*/ } void BufferManager::flushQ() { File *f = nullptr; for (auto i = bufQueue.begin(); i != bufQueue.end(); i++) { if (i->dirty) { f = findFile(i->filename); f->write(i->blocknum, i->block); i->dirty = false; } } } bool bufCompare(const BufferRecord& a, const BufferRecord& b) { return a.accessTime < b.accessTime; } void BufferManager::retimeQ() { time = 0; std::sort(bufQueue.begin(), bufQueue.end(), bufCompare); for (auto i = bufQueue.begin(); i != bufQueue.end(); i++) { i->accessTime = time++; } } void BufferManager::newTrashCan() { trashFile = new std::fstream(trash, std::ios::out | std::ios::trunc); } void BufferManager::appendTrashCan(int blocknum, int offset) { (*trashFile) << blocknum << " " << offset << "\n"; } void BufferManager::beginFetchTrash() { trashFile->close(); trashFile->open(trash, std::ios::in); } bool BufferManager::fetchTrash(int &blocknum, int& offset) { bool flag; flag = ((*trashFile) >> blocknum >> offset); if (!flag) trashFile->close(); return flag; } void BufferManager::emptyTrashCan() { std::streamoff end = trashFile->tellp(); trashFile->seekp(0); int b, c; while (trashFile->tellp() < end) { // TODO } } <file_sep>#include <readline/readline.h> #include <readline/history.h> #include <iostream> #include <cctype> #include "../parser/stmt.h" #include "../parser.tab.h" #include "repl.h" typedef struct yy_buffer_state *YY_BUFFER_STATE; extern YY_BUFFER_STATE yy_scan_string (const char *yy_str ); extern void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); extern void yy_delete_buffer (YY_BUFFER_STATE b ); extern int yyparse (); using namespace std; void system_init() { cout << "System Initialized!" << endl; } int main() { system_init(); while(true) { char * line = readline(">>> "); add_history(line); int len = strlen(line); // for (int i = 0; i < len; i++) line[i] = toupper(line[i]); cout << "What you typed: " << line << endl; char *tmp = new char[len + 2]; strcpy(tmp, line); tmp[len + 1] = 0; YY_BUFFER_STATE my_string_buffer = yy_scan_string(tmp); yy_switch_to_buffer( my_string_buffer ); yyparse(); yy_delete_buffer( my_string_buffer ); while( !stmt_queue.empty() ) { switch( stmt_queue.front().first ) { case stmt_type::_create_table_stmt: xyzsql_process_create_table(); break; case stmt_type::_create_index_stmt: xyzsql_process_create_index(); break; case stmt_type::_select_stmt: xyzsql_process_select(); break; case stmt_type::_insert_stmt: xyzsql_process_insert(); break; case stmt_type::_delete_stmt: xyzsql_process_delete(); break; case stmt_type::_drop_table_stmt: xyzsql_process_drop_table(); break; case stmt_type::_drop_index_stmt: xyzsql_process_drop_index(); break; case stmt_type::_transaction_stmt: xyzsql_process_transaction(); break; case stmt_type::_commit_stmt: xyzsql_process_commit(); break; case stmt_type::_rollback_stmt: xyzsql_process_rollback(); break; case stmt_type::_quit_stmt: xyzsql_exit(); break; case stmt_type::_exefile_stmt: xyzsql_batch(); break; default: xyzsql_unknown_stmt(); } stmt_queue.pop(); } } return 0; } <file_sep>#include <iostream> #include <string> #include "BufferManager.h" #include <cstdio> #include <cstdlib> int main() { BufferManager buf; //buf.createFile("new"); //std::ofstream *y = new std::ofstream("testfstream"); //y->seekp(100); //(*y) << "ff"; //y->close(); //return 0; ////x.seekp(100); ////x << "123"; ////x.close(); //return 0; Block test; test.fillOne(); //buf.createFile("db"); //buf.createFile("index"); //buf.createFile("catalog"); buf.writeBlock("index", 0, test); buf.writeBlock("index", 0, test); buf.writeBlock("index", 2, test); buf.printQ(); buf.writeBlock("db", 2, test); buf.printQ(); for (auto i = 0; i < 32; i++) { buf.writeBlock("db", i, test); buf.printQ(); } buf.writeBlock("db", 31, test); Block x = buf.readBlock("db", 31); std::cout << x.equal(test) << std::endl; buf.printQ(); x = buf.readBlock("index", 0); std::cout << x.equal(test) << std::endl; buf.printQ(); for (auto i = 0; i < 32; i++) { x = buf.readBlock("db", i); buf.printQ(); std::cout << x.equal(test) << std::endl; } //buf.createFile("go"); //buf.writeBlock("go", 2, test); buf.flushQ(); system("pause"); return 0; } <file_sep>/* * mainTest.cpp * * Created on: 2014Äê11ÔÂ7ÈÕ * Author: Admin */ #include <iostream> #include <ctime> #include <cstdlib> #include "IndexManager.h" #define MAX 80 using namespace std; void f(string a[], int num) { int n=num; for(int j=0;j<n-1;j++){ int i=rand()%num; string tmp; tmp=a[num-1]; a[num-1]=a[i]; a[i]=tmp; num--; } } int main() { srand(time(NULL)); int a,b,c; int32_t block[MAX]={0}; int32_t offset[MAX]={0}; string value[MAX]; string dvalue[190]; BufferManager myBufferManager; IndexManager i(&myBufferManager); //myBufferManager.createFile("abc.db"); //cout<<"ok"<<endl; for(int i=0;i<MAX;i++) { stringstream ss; ss.clear(); ss<<i; ss>>value[i]; } for(int i=0;i<190;i++) { stringstream ss; ss.clear(); ss<<i; ss>>dvalue[i]; } f(value,MAX); f(dvalue,190); // for(int i=0;i<MAX;i++) // cout<<value[i]<<endl; // cout<<endl; // for(int i=0;i<190;i++) // cout<<dvalue[i]<<endl; i.createIndex("abc.db","char",200,MAX,value,block,offset); // int j; // for(j=0;j<190;j++){ // //cout<<"delete:"<<dvalue[j]<<endl; // i.deleteNode("abc.db",dvalue[j]); // } // i.print("abc.db"); // while(true) // { // //getchar(); // cout<<"delete:"<<dvalue[j]<<endl; // //getchar(); // i.deleteNode("abc.db",dvalue[j]); // cout<<"delete finish"<<endl; // // getchar(); // // i.print("abc.db"); // j++; // } // while(true){ // string str; // cout<<"please input:"; // cin>>str; // // if(str!="delete"&&str!="insert") // continue; // cin>>c; // string tmp11; // stringstream sss; // sss.clear(); // sss<<c; // sss>>tmp11; // if(str=="insert") // i.insertNode("a",tmp11,123,456); // else // i.deleteNode("a",tmp11); // i.print("aa"); // }afasd return 0; } <file_sep>#include "Block.h" #include <cstring> Block::Block() { } void Block::fillOne() { for (auto i = 0; i < BLOCKSIZE; i++) { data[i] = 0xff; } } void Block::fillZero() { std::memset(data, 0, BLOCKSIZE); } char * Block::dataPointer() { return reinterpret_cast<char *>(&data); } Block::~Block() { } bool Block::equal(Block &b) { bool f = true; for (auto i = 0; i < BLOCKSIZE; i++) { if (this->data[i] != b.data[i]) { f = false; break; } } return f; } <file_sep>#include "evaluator.h" queue<pair<stmt_type, statement *> > stmt_queue; extern catalog_manager catm; extern string base_addr; extern RecordManager RecordManager; extern BufferManager BufferManager; extern IndexManager IndexManager; extern ifstream bat; extern int yyparse(); typedef struct yy_buffer_state *YY_BUFFER_STATE; extern YY_BUFFER_STATE yy_create_buffer ( FILE *file, int size ); extern void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); extern void yy_delete_buffer (YY_BUFFER_STATE b ); bool verify_validation(vector<record_value> *r, vector<table_column *> *t) { auto i = r->begin(); auto j = t->begin(); if (r->size() != t->size()) return false; for( ; i != r->end(); i++, j++) { if ((*i).data_type != (*j)->data_type) return false; if ((*i).data_type == table_column::CHARTYPE && strlen((*i).as_str()) >= (*j)->str_len) return false; } return true; } string create_temp_table(vector<table_column *> *t) { uuid_t out; char *uuid_str = new char[40]; uuid_generate(out); uuid_unparse(out, uuid_str); create_table_stmt *cs = new create_table_stmt(uuid_str, t); xyzsql_process_create_table(cs); string res(uuid_str); delete[] uuid_str; return res; } bool calc_conditions(vector<condition *> *conditions, Record &c, Record &d) { for(auto x : *conditions) { auto col1 = catm.get_column(x->left_attr); auto col2 = catm.get_column(x->right_attr); if (x->calc( {col2, c.get_value(col1)}, {col2, d.get_value(col2)} ) == false) return false; } return true; } bool calc_conditions(vector<condition *> *conditions, Record &c) { for(auto x : *conditions) { auto col = catm.get_column(x->left_attr); if (x->calc( {col, c.get_value(col)} ) == false) return false; } return true; } void calc_algric_tree(algbric_node *root) { if (root->flag == true) return; string table_name; int blockNum, offset; auto new_col_list = new vector<table_column *>, old_col_list = new_col_list, old_col_list2 = new_col_list; // auto old_col_list = catm.exist_relation((root->left->table))->cols; switch ( root->op ) { case algbric_node::DIRECT : root->flag = true; return; case algbric_node::PROJECTION : { if (!root->left->flag) calc_algric_tree(root->left); old_col_list = catm.exist_relation((root->left->table))->cols; for( auto x : *(root->projection_list) ) { auto att = catm.exist_relation(x->relation_name)->get_column(x->attribute_name); new_col_list->push_back(new table_column((root->left->op == algbric_node::DIRECT ? x->attribute_name.c_str() : x->full_name.c_str()), att->data_type, att->str_len, 0 )); } table_name = create_temp_table(new_col_list); root->table = table_name; auto cursor = RecordManager.getCursor(root->left->table, catm.calc_record_size(root->left->table)); while (cursor->next()) { Record r = cursor->getRecord(); vector<record_value> result; for(auto i = new_col_list->begin(); i != new_col_list->end(); i++) { for(auto j = old_col_list->begin(); j != old_col_list->end(); j++ ) { if ( (*i)->name == (*j)->name ) { result.push_back(r.values[j-old_col_list->begin()]); } } } RecordManager.insertRecord(table_name, Record(result, new_col_list), blockNum, offset); } if (root->left->op != algbric_node::DIRECT) catm.drop_table(root->left->table); root->flag = true; return; } case algbric_node::SELECTION : { string left_name = root->left->table; old_col_list = catm.exist_relation(left_name)->cols; for( auto x : *(old_col_list) ) { new_col_list->push_back( new table_column(x->name.c_str(), x->data_type, x->str_len, x->flag )); } table_name = create_temp_table(new_col_list); root->table = table_name; condition *p = NULL, *eq = NULL; for(auto x : (root->conditions)) { if ( catm.is_indexed(x->left_attr) ) { p = x; if (x->op == condition::EQUALTO) eq = x; } } if ( eq != NULL ) p = eq; if ( p != NULL ) { cout << "Index used: " << p->left_attr->full_name << endl; int record_size = catm.calc_record_size(root->left->table); auto t = p->left_attr; indexIterator cursor; int asdf = IndexManager.selectNode(cursor, t->relation_name + "/index_" + t->attribute_name + ".db", p->op, (p->v).to_str(catm.get_data_type(t))); if ( asdf == 0 ) { int b = 0, c = 0; while (cursor.next(b, c) == 0) { Record a = RecordManager.getRecord(t->relation_name, b, c, record_size); if (calc_conditions(&(root->conditions), a)) RecordManager.insertRecord(table_name, a, blockNum, offset); } } } else { string t = root->left->table; int record_size = catm.calc_record_size(t); indexIterator cursor; int asdf = IndexManager.getStarter(cursor, t + "/index_" + catm.get_primary(t) + ".db"); if ( asdf == 0 ) { int b = 0, c = 0; while (cursor.next(b, c) == 0) { Record a = RecordManager.getRecord(t, b, c, record_size); if (calc_conditions(&(root->conditions), a)) RecordManager.insertRecord(table_name, a, blockNum, offset); } } } root->flag = true; return; } case algbric_node::JOIN : { if (!root->left->flag) calc_algric_tree(root->left); if (!root->right->flag) calc_algric_tree(root->right); if ( catm.get_size(root->right->table) < catm.get_size(root->left->table) ) { auto tmp = root->left; root->left = root->right; root->right = tmp; } old_col_list = catm.exist_relation((root->left->table))->cols; old_col_list2= catm.exist_relation((root->right->table))->cols; for( auto x : *old_col_list ) { new_col_list->push_back(new table_column(x->name.c_str(), x->data_type, x->str_len, 0 )); } for( auto x : *old_col_list2 ) { new_col_list->push_back(new table_column(x->name.c_str(), x->data_type, x->str_len, 0 )); } table_name = create_temp_table(new_col_list); root->table = table_name; auto outter_table = catm.exist_relation(root->left->table), inner_table = catm.exist_relation(root->right->table); int outter_size = catm.calc_record_size(root->left->table), inner_size = catm.calc_record_size(root->right->table); outter_table->get_size(); condition * p = NULL; for ( auto x : root->conditions ) { if ( outter_table->get_column(x->left_attr->full_name) != NULL && inner_table->get_column(x->right_attr->full_name) != NULL) { } else if ( inner_table->get_column(x->left_attr->full_name) != NULL && outter_table->get_column(x->right_attr->full_name) != NULL) { auto tmp = x->left_attr; x->left_attr = x->right_attr; x->right_attr = tmp; } assert( outter_table->get_column(x->left_attr->full_name) != NULL && inner_table->get_column(x->right_attr->full_name) != NULL); if ( inner_table->is_indexed(x->right_attr->full_name) ) { p = x; } } auto cursor1 = RecordManager.getCursor(root->left->table, outter_size); cout << "Index used: " << p->right_attr->full_name << endl; while (cursor1->next()) { Record r1 = cursor1->getRecord(); if ( p ) { // nested-index join indexIterator a; int asdf = IndexManager.getStarter(a, root->right->table + "/index_" + p->right_attr->full_name + ".db"); if (asdf == 0) { int b = 0, c = 0; while (a.next(b, c) == 0) { Record r2 = RecordManager.getRecord(root->right->table, b, c, inner_size); if ( calc_conditions(&(root->conditions), r1, r2) ) { vector<record_value> result(r1.values); result.insert(result.end(), r2.values.begin(), r2.values.end()); RecordManager.insertRecord(table_name, Record(result, new_col_list), blockNum, offset); } } } } else { // nested-loop join auto cursor2 = RecordManager.getCursor(root->right->table, inner_size); while (cursor2->next()) { Record r2 = cursor2->getRecord(); if ( calc_conditions(&(root->conditions), r1, r2) ) { vector<record_value> result(r1.values); result.insert(result.end(), r2.values.begin(), r2.values.end()); RecordManager.insertRecord(table_name, Record(result, new_col_list), blockNum, offset); } } delete cursor2; } } delete cursor1; if (root->right->op != algbric_node::DIRECT) catm.drop_table(root->right->table); if (root->left->op != algbric_node::DIRECT) catm.drop_table(root->left->table); root->flag = true; return; } } } void xyzsql_emit_stmt(stmt_type t, statement *stmt) { stmt_queue.push({t, stmt}); } void xyzsql_batch() { auto s = dynamic_cast<exefile_stmt *>(stmt_queue.front().second); // FILE *tmp = fopen(s->file_name.c_str(), "r"); // fseek(tmp, 0, SEEK_END); // int buf_length = ftell(tmp); // fclose(tmp); bat.open(s->file_name); // yyin = bat; // YY_BUFFER_STATE new_buffer = yy_create_buffer( yyin, buf_length ); // yy_switch_to_buffer(new_buffer); // yyparse(); // yy_delete_buffer(new_buffer); // fclose(bat); } void xyzsql_finalize() { catm.write_back(); BufferManager.flushQ(); } void xyzsql_exit() { cout << "now exit!" << endl; xyzsql_finalize(); exit(0); } void xyzsql_process_create_table(create_table_stmt *s ) { cout << "table created." << endl; if ( s == NULL ) { s = dynamic_cast<create_table_stmt *>(stmt_queue.front().second); for ( auto x : *(s->cols) ) { x->name = s->name + "." + x->name; } } catm.add_relation(s); catm.write_back(); RecordManager.createMaster(s->name); for(auto x : *(s->cols)) { if(x->flag & (table_column::unique_attr | table_column::primary_attr)) { IndexManager.createIndex(s->name + "/index_" + x->name + ".db", data_type_to_str(x->data_type), x->str_len, 0, {}, {}, {}); } } } void xyzsql_process_create_index() { auto s = dynamic_cast<create_index_stmt *>(stmt_queue.front().second); int &flag = catm.get_column(s->attr)->flag; if ( flag & table_column::unique_attr ) flag |= table_column::index_attr; else throw invalid_argument("Index must be created in unique attribute!"); cout << "index created." << endl; } void xyzsql_process_select() { cout << "select parsed." << endl; auto s = dynamic_cast<select_stmt *>(stmt_queue.front().second); // checker // push selection vector< algbric_node * > leaf_nodes; for(auto x : *(s->table_list)) { // add to select queue algbric_node *direct = new algbric_node(algbric_node::DIRECT); direct->table = *x; algbric_node *select = new algbric_node(algbric_node::SELECTION); for ( auto y : *(s->condition_list) ) { if ( y->flag == false && y->left_attr->relation_name == *(x) ) { select->conditions.push_back(y); } } if (select->conditions.empty()) { direct->flag = true; leaf_nodes.push_back(direct); delete select; } else { select->left = direct; calc_algric_tree(select); leaf_nodes.push_back(select); } } algbric_node *root = NULL; set<string> rel_set; while (!leaf_nodes.empty()) { int tmp = 0xFFFFFF; auto label = leaf_nodes.begin(); for(auto i = leaf_nodes.begin(); i != leaf_nodes.end(); i++) { if ( catm.get_size(((*i)->table)) < tmp ) { tmp = catm.get_size(((*i)->table)); label = i; } } if (root == NULL) { root = *label; rel_set.insert(root->op == algbric_node::DIRECT ? (root->table) : (root->left->table)); } else { auto tmp = new algbric_node(algbric_node::JOIN); tmp->left = root; tmp->right = *label; root = tmp; string right_name = root->right->op == algbric_node::DIRECT ? (root->right->table) : (root->right->left->table); for(auto x : *(s->condition_list)) { if (x->flag == true) { if ( (rel_set.count(x->left_attr->relation_name) && right_name == x->right_attr->relation_name) || (rel_set.count(x->right_attr->relation_name) && right_name == x->left_attr->relation_name) ) { root->conditions.push_back(x); } } } rel_set.insert(right_name); } leaf_nodes.erase(label); } // set root if (!s->projection_list->empty()) { auto tmp = new algbric_node(algbric_node::PROJECTION); tmp->projection_list = s->projection_list; tmp->left = root; root = tmp; } calc_algric_tree(root); auto cursor = RecordManager.getCursor(root->table, catm.calc_record_size(root->table)); while (cursor->next()) { Record t = cursor->getRecord(); auto j = t.table_info->begin(); for (record_value x : t.values) { cout << x.to_str((*j)->data_type) << " "; j++; } cout << endl; } if (root->op != algbric_node::DIRECT) catm.drop_table(root->table); delete root; } void xyzsql_process_drop_table() { auto s = dynamic_cast<drop_table_stmt *>(stmt_queue.front().second); catm.drop_table(s->table_name); cout << "table dropped." << endl; } void xyzsql_process_drop_index() { auto s = dynamic_cast<drop_index_stmt *>(stmt_queue.front().second); int &flag = catm.get_column(s->attr)->flag; flag |= table_column::index_attr; flag -= table_column::index_attr; cout << "index dropped." << endl; } void xyzsql_process_delete() { auto s = dynamic_cast<delete_stmt *>(stmt_queue.front().second); if (s->condition_list->empty()) { // delete all system(("rm " + s->table_name + "/*.db").c_str()); RecordManager.createMaster(s->table_name); auto table_info = catm.exist_relation(s->table_name); auto cols = table_info->cols; table_info->set_size(0); for(auto x : *cols) if(x->flag & (table_column::unique_attr | table_column::primary_attr)) IndexManager.createIndex(s->table_name + "/index_" + x->name + ".db", data_type_to_str(x->data_type), x->str_len, 0, {}, {}, {}); } else { auto table_info = catm.exist_relation(s->table_name); int record_size = catm.calc_record_size(s->table_name); BufferManager.newTrashCan(); // unique condition *p = NULL, *eq = NULL; for(auto x : *(s->condition_list)) { if ( catm.is_indexed(x->left_attr) ) { p = x; if (x->op == condition::EQUALTO) eq = x; } } if ( eq != NULL || p != NULL ) { cout << "Index used: " << p->left_attr->full_name << endl; auto t = eq == NULL ? p->left_attr : eq->left_attr; if (eq != NULL) p = eq; indexIterator a; int asdf = IndexManager.selectNode(a, base_addr + "/" + t->relation_name + "/index_" + t->attribute_name + ".db", p->op, (p->v).to_str(catm.get_data_type(t))); if ( asdf == 0 ) { int b = 0, c = 0; while (a.next(b, c) == 0) { Record a = RecordManager.getRecord(t->relation_name, b, c, record_size); if (calc_conditions(s->condition_list, a)) BufferManager.appendTrashCan(b, c); } } } else { indexIterator a; int asdf = IndexManager.getStarter(a, s->table_name + "/index_" + catm.get_primary(s->table_name) + ".db"); if (asdf == 0) { int b = 0, c = 0; while (a.next(b, c) == 0) { Record a = RecordManager.getRecord(s->table_name, b, c, record_size); if (calc_conditions(s->condition_list, a)) BufferManager.appendTrashCan(b, c); } } } BufferManager.beginFetchTrash(); int blockNum, offset; while(BufferManager.fetchTrash(blockNum, offset)) { auto r = RecordManager.getRecord(s->table_name, blockNum, offset, record_size); for(auto x : *(r.table_info)) { if(x->flag & (table_column::unique_attr | table_column::primary_attr)) { indexIterator cursor; IndexManager.deleteNode(s->table_name + "/index_" + x->name + ".db", r.get_value(x).to_str(x->data_type)); } } RecordManager.deleteRecord(s->table_name, blockNum, offset, record_size); table_info->dec_size(); } } cout << "records deleted." << endl; } void xyzsql_process_insert(insert_stmt *s ) { if ( s == NULL ) s = dynamic_cast<insert_stmt *>(stmt_queue.front().second); auto t = catm.exist_relation(s->table_name)->cols; if (verify_validation(s->values, t) == false) throw invalid_argument("Uncapatable values"); auto table_info = catm.exist_relation(s->table_name); Record r(*(s->values), table_info->cols); for(auto x : *(r.table_info)) { if(x->flag & (table_column::unique_attr | table_column::primary_attr)) { string filename; indexIterator cursor; int asdf = IndexManager.selectNode(cursor, s->table_name + "/index_" + x->name + ".db", condition::EQUALTO, r.get_value(x).to_str(x->data_type)); if (asdf == 0) throw invalid_argument("Unique Key already exists."); } } int blockNum, offset; RecordManager.insertRecord(s->table_name, r, blockNum, offset); table_info->inc_size(); for(auto x : *(r.table_info)) { if(x->flag & (table_column::unique_attr | table_column::primary_attr)) { IndexManager.insertNode(s->table_name + "/index_" + x->name + ".db", r.get_value(x->name).to_str(x->data_type) , blockNum, offset); } } } void xyzsql_unknown_stmt() { cout << "unknown statement, check syntax again." << endl; } void xyzsql_process_commit() { cout << "Transaction committed." << endl; } void xyzsql_process_rollback() { cout << "Transaction rollbacked." << endl; } void xyzsql_process_transaction() { cout << "Transaction on." << endl; } <file_sep>#include "stmt.h" #include <sstream> string data_type_to_str(int data_type) { switch (data_type) { case table_column::INTTYPE : return "int"; case table_column::FLOATTYPE: return "float"; case table_column::CHARTYPE : return "char"; default : return ""; } } float record_value::as_float() const { float tmp = 0; memcpy(&tmp, &value, 4); return tmp; } char *record_value:: as_str() const { // char *tmp; // memcpy(&tmp, &value, 4); return (char *)value; } int record_value::as_int() const { return value; } string record_value::to_str(int data_type) { stringstream x; switch (data_type) { case table_column::INTTYPE : x << as_int(); return x.str(); case table_column::FLOATTYPE : x << as_float(); return x.str(); case table_column::CHARTYPE : return string(as_str()); default : return ""; } } int record_value::compare_as_int(const record_value &a, const record_value &b) { if (a.as_int() < b.as_int()) return -1; else if (a.as_int() > b.as_int()) return 1; else return 0; } int record_value::compare_as_float(const record_value &a, const record_value &b) { if (a.as_float() < b.as_float()) return -1; else if (a.as_float() > b.as_float()) return 1; else return 0; } int record_value::compare_as_str(const record_value &a, const record_value &b) { return strcmp(a.as_str(), b.as_str()); } int record_value::compare(int data_type, const record_value &a, const record_value &b) { switch (data_type) { case table_column::INTTYPE : return compare_as_int(a, b); break; case table_column::FLOATTYPE : return compare_as_float(a, b); break; case table_column::CHARTYPE : return compare_as_str(a, b); break; default : return false; } } bool condition::calc(pair<table_column *, record_value> p1, pair<table_column *, record_value> p2) { assert(flag == true); switch(op) { case EQUALTO : return record_value::compare(p1.first->data_type, p1.second, p2.second) == 0; case GREATERTHAN : return record_value::compare(p1.first->data_type, p1.second, p2.second) > 0; case LESSTHAN : return record_value::compare(p1.first->data_type, p1.second, p2.second) < 0; case GREATER_EQUAL : return record_value::compare(p1.first->data_type, p1.second, p2.second) >= 0; case LESS_EQUAL : return record_value::compare(p1.first->data_type, p1.second, p2.second) <= 0; case NOT_EQUAL : return record_value::compare(p1.first->data_type, p1.second, p2.second) != 0; default : return false; } } bool condition::calc(pair<table_column *, record_value> p) { assert(flag == false); switch(op) { case EQUALTO : return record_value::compare(p.first->data_type, p.second, v) == 0; case GREATERTHAN : return record_value::compare(p.first->data_type, p.second, v) > 0; case LESSTHAN : return record_value::compare(p.first->data_type, p.second, v) < 0; case GREATER_EQUAL : return record_value::compare(p.first->data_type, p.second, v) >= 0; case LESS_EQUAL : return record_value::compare(p.first->data_type, p.second, v) <= 0; case NOT_EQUAL : return record_value::compare(p.first->data_type, p.second, v) != 0; default : return false; } } select_stmt::~select_stmt() { for (auto x : *projection_list) delete x; delete projection_list; for (auto x : *table_list) delete x; delete table_list; for (auto x : *condition_list) delete x; delete condition_list; } create_table_stmt::~create_table_stmt() { } create_index_stmt::~create_index_stmt() { delete attr; } insert_stmt::~insert_stmt() { for ( auto x : *values ) if (x.data_type == table_column::CHARTYPE) delete[] x.as_str(); delete values; } drop_index_stmt::~drop_index_stmt() { delete attr; } delete_stmt::~delete_stmt() { for( auto x : *condition_list ) delete x; delete condition_list; } condition::~condition() { if (left_attr != nullptr) delete left_attr; if (right_attr != nullptr) delete right_attr; } algbric_node::~algbric_node() { if (left != nullptr) delete left; if (right != nullptr) delete right; } <file_sep>#include "RecordManager.h" const std::string RecordManager::master = "master.db"; const std::string RecordManager::trash = "trash.tmp"; RecordManager::RecordManager(){ bm = nullptr; cm = nullptr; im = nullptr; trashFile = nullptr; cursor = nullptr; } void RecordManager::Init(BufferManager* BM, catalog_manager* CM, IndexManager * IM) { bm = BM; cm = CM; im = IM; } void RecordManager::createMaster(std::string tableName) { bm->createFile(tableName + "/" + master); recordBlock b; b.fillZero(); b.setBlockCount(0); bm->writeBlock(tableName + "/" + master, 0, b); // bm->printQ(); } void RecordManager::deleteRecord(std::string tableName, int blocknum, int offset, int size) { std::string filename = tableName + "/" + master; recordBlock r( bm->readBlock(filename, blocknum) ); r.deleteRecord(size, offset); bm->writeBlock(filename, blocknum, r); } void RecordManager::insertRecord(std::string tableName, Record newRecord, int& blockNum, int& offset) { std::string filename = tableName + "/" + master; // insert into file - block using free list // bool finish = false; recordBlock b = bm->readBlock(filename, 0); // bm->printQ(); recordBlock d; int i; for (i = 0; i < b.getBlockCount(); i++) { d = bm->readBlock(filename, i + 1); finish = d.insertRecord(newRecord, offset); if (finish) { bm->writeBlock(filename, i + 1, d); blockNum = i+1; break; } } if (!finish) { recordBlock newBlock; newBlock.Init(newRecord.size()); newBlock.insertRecord(newRecord, offset); bm->writeBlock(filename, b.getBlockCount() + 1, newBlock); blockNum = b.getBlockCount() + 1; b.setBlockCount(b.getBlockCount() + 1); bm->writeBlock(filename, 0, b); } // update index } Record RecordManager::getRecord(std::string tableName, int blocknum, int offset, int size) { std::string filename = tableName + "/" + master; auto cat = cm->exist_relation(tableName); recordBlock r = bm->readBlock(filename, blocknum); return Record(r.getRecord(size, offset), cat->cols); } Cursor* RecordManager::getCursor(std::string tableName, int size) { std::string filename = tableName + "/" + master; recordBlock start = bm->readBlock(filename, 0); // if (cursor != nullptr) { // delete cursor; // cursor = nullptr; // } // cursor = new Cursor(bm, cm, tableName, 1, 0, size, start.getBlockCount()); // return cursor; return new Cursor(bm, cm, tableName, 1, 0, size, start.getBlockCount()); } RecordManager::~RecordManager() {} <file_sep>#include "catalog.h" #include <sys/stat.h> #include <cassert> catalog::catalog(const string &_name) { ifstream in(_name); cols = new vector<table_column *>; in >> name; string col_name; while(in >> col_name) { int data_type, str_len, flag; in >> data_type >> str_len >> flag; cols->push_back(new table_column(col_name.c_str(), data_type, str_len, flag)); } in.close(); } void catalog::write_back(const string &addr) { ofstream out(addr + "/catalog"); cout << addr + "/catalog" << endl; out << name << endl; for(auto x : *cols) { out << x->name << " " << x->data_type << " " << x->str_len << " " << x->flag << endl; } out.close(); } table_column *catalog::get_column(const string &attr_name) { for(auto x : *cols) { if (x->name == attr_name) return x; } return NULL; } bool catalog::is_unique(const string &attr_name) { for(auto x : *cols) { if (x->name == attr_name) return x->flag & (table_column::unique_attr | table_column::primary_attr); } return false; } bool catalog::is_indexed(const string &attr_name) { for(auto x : *cols) { if (x->name == attr_name) return x->flag & (table_column::index_attr | table_column::primary_attr); } return false; } void catalog::set_size(int _count) { count = _count; } int catalog::get_size() { return count; } void catalog::inc_size() { count++; } void catalog::dec_size() { count--; } string catalog::get_primary() { for(auto x : *cols) { if (x->flag & table_column::primary_attr) return x->name; } return ""; } const string &catalog::get_name() { return name; } int catalog::get_pos(const string &attr_name) { for(auto i = cols->begin(); i != cols->end(); i++) if ((*i)->name == attr_name) return (i-cols->begin()); return -1; } catalog_manager::catalog_manager(const string &_base_addr) : base_addr(_base_addr.back() == '/' ? _base_addr : _base_addr + "/") { assert( base_addr.back() == '/' ); ifstream in(base_addr + "catalog"); string tmp; if (in.is_open()) { while (in >> tmp) { relations[tmp] = new catalog(base_addr + tmp + "/catalog"); int count; in >> count; relations[tmp]->set_size(count); } } in.close(); } catalog * catalog_manager::exist_relation(const string &rel_name) { auto tmp = relations.find(rel_name); if ( tmp == relations.end() ) throw invalid_argument("Table does not exist."); else return tmp->second; } catalog * catalog_manager::add_relation(create_table_stmt *tmp) { if (relations.count(tmp->name) == 1) throw invalid_argument("Table already exsists."); auto new_catalog = new catalog(tmp); relations[tmp->name] = new_catalog; // mkdir and write back mkdir((new_catalog->get_name()).c_str(), 0777); // TODO exception handler new_catalog->write_back((new_catalog->get_name())); return new_catalog; } void catalog_manager::write_back() { ofstream out("catalog"); for ( auto x : relations ) { out << x.first << " " << x.second->get_size() << endl; x.second->write_back(x.second->get_name()); } out.close(); } table_column *catalog_manager::get_column(attribute *t) { return exist_relation(t->relation_name)->get_column(t->attribute_name); } bool catalog_manager::is_unique(attribute *t) { return exist_relation(t->relation_name)->is_unique(t->attribute_name); } bool catalog_manager::is_indexed(attribute *t) { return exist_relation(t->relation_name)->is_indexed(t->attribute_name); } string catalog_manager::get_primary(const string &rel_name) { return exist_relation(rel_name)->get_primary(); } int catalog_manager::get_size(const string &rel_name) { return exist_relation(rel_name)->get_size(); } int catalog_manager::get_data_type(const attribute *t) { return exist_relation(t->relation_name)->get_column(t->attribute_name)->data_type; } int catalog_manager::calc_record_size(const string &rel_name ) { auto t = exist_relation(rel_name); int c = 0; for( auto x : *(t->cols) ) { c += x->str_len; } return c; } void catalog_manager::drop_table(const string &rel_name) { if (relations.count(rel_name) == 0) throw invalid_argument("Table does not exist."); else { system(("rm -rf " + rel_name).c_str()); delete relations[rel_name]; relations.erase(rel_name); } } <file_sep>#ifndef __CATALOG_H__ #define __CATALOG_H__ #include "../parser/stmt.h" #include <iostream> #include <fstream> #include <map> #include <cstring> class catalog { private: string name; int count = 0; public: vector<table_column *> *cols; catalog(const char *_name, vector<table_column *> *_cols) : name(_name), cols(_cols) {} catalog(create_table_stmt* _table) : name(_table->name), cols(_table->cols) {} catalog(const string &_name); const string &get_name(); void write_back(const string &); table_column *get_column(const string &attr_name); bool is_unique(const string &); bool is_indexed(const string &); string get_primary(); int get_pos(const string &); int get_size(); void set_size(int); void inc_size(); void dec_size(); }; class catalog_manager { private: string base_addr; map<string, catalog *> relations; public: catalog_manager(const string &base_addr); catalog *exist_relation(const string &rel_name); catalog *add_relation(create_table_stmt *); void write_back(); table_column *get_column(attribute *t); bool is_unique(attribute *t); bool is_indexed(attribute *t); string get_primary(const string &); int get_size( const string & ); int get_data_type( const attribute * ); int calc_record_size( const string & ); void drop_table( const string & ); }; #endif <file_sep>insert into qq values (0); insert into qq values (1); insert into qq values (2); insert into qq values (3); insert into qq values (4); insert into qq values (5); insert into qq values (6); insert into qq values (7); insert into qq values (8); insert into qq values (9); insert into qq values (10); insert into qq values (11); insert into qq values (12); insert into qq values (13); insert into qq values (14); insert into qq values (15); insert into qq values (16); insert into qq values (17); insert into qq values (18); insert into qq values (19); insert into qq values (20); insert into qq values (21); insert into qq values (22); insert into qq values (23); insert into qq values (24); insert into qq values (25); insert into qq values (26); insert into qq values (27); insert into qq values (28); insert into qq values (29); insert into qq values (30); insert into qq values (31); insert into qq values (32); insert into qq values (33); insert into qq values (34); insert into qq values (35); insert into qq values (36); insert into qq values (37); insert into qq values (38); insert into qq values (39); insert into qq values (40); insert into qq values (41); insert into qq values (42); insert into qq values (43); insert into qq values (44); insert into qq values (45); insert into qq values (46); insert into qq values (47); insert into qq values (48); insert into qq values (49); insert into qq values (50); insert into qq values (51); insert into qq values (52); insert into qq values (53); insert into qq values (54); insert into qq values (55); insert into qq values (56); insert into qq values (57); insert into qq values (58); insert into qq values (59); insert into qq values (60); insert into qq values (61); insert into qq values (62); insert into qq values (63); insert into qq values (64); insert into qq values (65); insert into qq values (66); insert into qq values (67); insert into qq values (68); insert into qq values (69); insert into qq values (70); insert into qq values (71); insert into qq values (72); insert into qq values (73); insert into qq values (74); insert into qq values (75); insert into qq values (76); insert into qq values (77); insert into qq values (78); insert into qq values (79); insert into qq values (80); insert into qq values (81); insert into qq values (82); insert into qq values (83); insert into qq values (84); insert into qq values (85); insert into qq values (86); insert into qq values (87); insert into qq values (88); insert into qq values (89); insert into qq values (90); insert into qq values (91); insert into qq values (92); insert into qq values (93); insert into qq values (94); insert into qq values (95); insert into qq values (96); insert into qq values (97); insert into qq values (98); insert into qq values (99); insert into qq values (100); insert into qq values (101); insert into qq values (102); insert into qq values (103); insert into qq values (104); insert into qq values (105); insert into qq values (106); insert into qq values (107); insert into qq values (108); insert into qq values (109); insert into qq values (110); insert into qq values (111); insert into qq values (112); insert into qq values (113); insert into qq values (114); insert into qq values (115); insert into qq values (116); insert into qq values (117); insert into qq values (118); insert into qq values (119); insert into qq values (120); insert into qq values (121); insert into qq values (122); insert into qq values (123); insert into qq values (124); insert into qq values (125); insert into qq values (126); insert into qq values (127); insert into qq values (128); insert into qq values (129); insert into qq values (130); insert into qq values (131); insert into qq values (132); insert into qq values (133); insert into qq values (134); insert into qq values (135); insert into qq values (136); insert into qq values (137); insert into qq values (138); insert into qq values (139); insert into qq values (140); insert into qq values (141); insert into qq values (142); insert into qq values (143); insert into qq values (144); insert into qq values (145); insert into qq values (146); insert into qq values (147); insert into qq values (148); insert into qq values (149); insert into qq values (150); insert into qq values (151); insert into qq values (152); insert into qq values (153); insert into qq values (154); insert into qq values (155); insert into qq values (156); insert into qq values (157); insert into qq values (158); insert into qq values (159); insert into qq values (160); insert into qq values (161); insert into qq values (162); insert into qq values (163); insert into qq values (164); insert into qq values (165); insert into qq values (166); insert into qq values (167); insert into qq values (168); insert into qq values (169); insert into qq values (170); insert into qq values (171); insert into qq values (172); insert into qq values (173); insert into qq values (174); insert into qq values (175); insert into qq values (176); insert into qq values (177); insert into qq values (178); insert into qq values (179); insert into qq values (180); insert into qq values (181); insert into qq values (182); insert into qq values (183); insert into qq values (184); insert into qq values (185); insert into qq values (186); insert into qq values (187); insert into qq values (188); insert into qq values (189); insert into qq values (190); insert into qq values (191); insert into qq values (192); insert into qq values (193); insert into qq values (194); insert into qq values (195); insert into qq values (196); insert into qq values (197); insert into qq values (198); insert into qq values (199); insert into qq values (200); insert into qq values (201); insert into qq values (202); insert into qq values (203); insert into qq values (204); insert into qq values (205); insert into qq values (206); insert into qq values (207); insert into qq values (208); insert into qq values (209); insert into qq values (210); insert into qq values (211); insert into qq values (212); insert into qq values (213); insert into qq values (214); insert into qq values (215); insert into qq values (216); insert into qq values (217); insert into qq values (218); insert into qq values (219); insert into qq values (220); insert into qq values (221); insert into qq values (222); insert into qq values (223); insert into qq values (224); insert into qq values (225); insert into qq values (226); insert into qq values (227); insert into qq values (228); insert into qq values (229); insert into qq values (230); insert into qq values (231); insert into qq values (232); insert into qq values (233); insert into qq values (234); insert into qq values (235); insert into qq values (236); insert into qq values (237); insert into qq values (238); insert into qq values (239); insert into qq values (240); insert into qq values (241); insert into qq values (242); insert into qq values (243); insert into qq values (244); insert into qq values (245); insert into qq values (246); insert into qq values (247); insert into qq values (248); insert into qq values (249); insert into qq values (250); insert into qq values (251); insert into qq values (252); insert into qq values (253); insert into qq values (254); insert into qq values (255); insert into qq values (256); insert into qq values (257); insert into qq values (258); insert into qq values (259); insert into qq values (260); insert into qq values (261); insert into qq values (262); insert into qq values (263); insert into qq values (264); insert into qq values (265); insert into qq values (266); insert into qq values (267); insert into qq values (268); insert into qq values (269); insert into qq values (270); insert into qq values (271); insert into qq values (272); insert into qq values (273); insert into qq values (274); insert into qq values (275); insert into qq values (276); insert into qq values (277); insert into qq values (278); insert into qq values (279); insert into qq values (280); insert into qq values (281); insert into qq values (282); insert into qq values (283); insert into qq values (284); insert into qq values (285); insert into qq values (286); insert into qq values (287); insert into qq values (288); insert into qq values (289); insert into qq values (290); insert into qq values (291); insert into qq values (292); insert into qq values (293); insert into qq values (294); insert into qq values (295); insert into qq values (296); insert into qq values (297); insert into qq values (298); insert into qq values (299); insert into qq values (300); insert into qq values (301); insert into qq values (302); insert into qq values (303); insert into qq values (304); insert into qq values (305); insert into qq values (306); insert into qq values (307); insert into qq values (308); insert into qq values (309); insert into qq values (310); insert into qq values (311); insert into qq values (312); insert into qq values (313); insert into qq values (314); insert into qq values (315); insert into qq values (316); insert into qq values (317); insert into qq values (318); insert into qq values (319); insert into qq values (320); insert into qq values (321); insert into qq values (322); insert into qq values (323); insert into qq values (324); insert into qq values (325); insert into qq values (326); insert into qq values (327); insert into qq values (328); insert into qq values (329); insert into qq values (330); insert into qq values (331); insert into qq values (332); insert into qq values (333); insert into qq values (334); insert into qq values (335); insert into qq values (336); insert into qq values (337); insert into qq values (338); insert into qq values (339); insert into qq values (340); insert into qq values (341); insert into qq values (342); insert into qq values (343); insert into qq values (344); insert into qq values (345); insert into qq values (346); insert into qq values (347); insert into qq values (348); insert into qq values (349); insert into qq values (350); insert into qq values (351); insert into qq values (352); insert into qq values (353); insert into qq values (354); insert into qq values (355); insert into qq values (356); insert into qq values (357); insert into qq values (358); insert into qq values (359); insert into qq values (360); insert into qq values (361); insert into qq values (362); insert into qq values (363); insert into qq values (364); insert into qq values (365); insert into qq values (366); insert into qq values (367); insert into qq values (368); insert into qq values (369); insert into qq values (370); insert into qq values (371); insert into qq values (372); insert into qq values (373); insert into qq values (374); insert into qq values (375); insert into qq values (376); insert into qq values (377); insert into qq values (378); insert into qq values (379); insert into qq values (380); insert into qq values (381); insert into qq values (382); insert into qq values (383); insert into qq values (384); insert into qq values (385); insert into qq values (386); insert into qq values (387); insert into qq values (388); insert into qq values (389); insert into qq values (390); insert into qq values (391); insert into qq values (392); insert into qq values (393); insert into qq values (394); insert into qq values (395); insert into qq values (396); insert into qq values (397); insert into qq values (398); insert into qq values (399); insert into qq values (400); insert into qq values (401); insert into qq values (402); insert into qq values (403); insert into qq values (404); insert into qq values (405); insert into qq values (406); insert into qq values (407); insert into qq values (408); insert into qq values (409); insert into qq values (410); insert into qq values (411); insert into qq values (412); insert into qq values (413); insert into qq values (414); insert into qq values (415); insert into qq values (416); insert into qq values (417); insert into qq values (418); insert into qq values (419); insert into qq values (420); insert into qq values (421); insert into qq values (422); insert into qq values (423); insert into qq values (424); insert into qq values (425); insert into qq values (426); insert into qq values (427); insert into qq values (428); insert into qq values (429); insert into qq values (430); insert into qq values (431); insert into qq values (432); insert into qq values (433); insert into qq values (434); insert into qq values (435); insert into qq values (436); insert into qq values (437); insert into qq values (438); insert into qq values (439); insert into qq values (440); insert into qq values (441); insert into qq values (442); insert into qq values (443); insert into qq values (444); insert into qq values (445); insert into qq values (446); insert into qq values (447); insert into qq values (448); insert into qq values (449); insert into qq values (450); insert into qq values (451); insert into qq values (452); insert into qq values (453); insert into qq values (454); insert into qq values (455); insert into qq values (456); insert into qq values (457); insert into qq values (458); insert into qq values (459); insert into qq values (460); insert into qq values (461); insert into qq values (462); insert into qq values (463); insert into qq values (464); insert into qq values (465); insert into qq values (466); insert into qq values (467); insert into qq values (468); insert into qq values (469); insert into qq values (470); insert into qq values (471); insert into qq values (472); insert into qq values (473); insert into qq values (474); insert into qq values (475); insert into qq values (476); insert into qq values (477); insert into qq values (478); insert into qq values (479); insert into qq values (480); insert into qq values (481); insert into qq values (482); insert into qq values (483); insert into qq values (484); insert into qq values (485); insert into qq values (486); insert into qq values (487); insert into qq values (488); insert into qq values (489); insert into qq values (490); insert into qq values (491); insert into qq values (492); insert into qq values (493); insert into qq values (494); insert into qq values (495); insert into qq values (496); insert into qq values (497); insert into qq values (498); insert into qq values (499); insert into qq values (500); insert into qq values (501); insert into qq values (502); insert into qq values (503); insert into qq values (504); insert into qq values (505); insert into qq values (506); insert into qq values (507); insert into qq values (508); insert into qq values (509); insert into qq values (510); insert into qq values (511); insert into qq values (512); insert into qq values (513); insert into qq values (514); insert into qq values (515); insert into qq values (516); insert into qq values (517); insert into qq values (518); insert into qq values (519); insert into qq values (520); insert into qq values (521); insert into qq values (522); insert into qq values (523); insert into qq values (524); insert into qq values (525); insert into qq values (526); insert into qq values (527); insert into qq values (528); insert into qq values (529); insert into qq values (530); insert into qq values (531); insert into qq values (532); insert into qq values (533); insert into qq values (534); insert into qq values (535); insert into qq values (536); insert into qq values (537); insert into qq values (538); insert into qq values (539); insert into qq values (540); insert into qq values (541); insert into qq values (542); insert into qq values (543); insert into qq values (544); insert into qq values (545); insert into qq values (546); insert into qq values (547); insert into qq values (548); insert into qq values (549); insert into qq values (550); insert into qq values (551); insert into qq values (552); insert into qq values (553); insert into qq values (554); insert into qq values (555); insert into qq values (556); insert into qq values (557); insert into qq values (558); insert into qq values (559); insert into qq values (560); insert into qq values (561); insert into qq values (562); insert into qq values (563); insert into qq values (564); insert into qq values (565); insert into qq values (566); insert into qq values (567); insert into qq values (568); insert into qq values (569); insert into qq values (570); insert into qq values (571); insert into qq values (572); insert into qq values (573); insert into qq values (574); insert into qq values (575); insert into qq values (576); insert into qq values (577); insert into qq values (578); insert into qq values (579); insert into qq values (580); insert into qq values (581); insert into qq values (582); insert into qq values (583); insert into qq values (584); insert into qq values (585); insert into qq values (586); insert into qq values (587); insert into qq values (588); insert into qq values (589); insert into qq values (590); insert into qq values (591); insert into qq values (592); insert into qq values (593); insert into qq values (594); insert into qq values (595); insert into qq values (596); insert into qq values (597); insert into qq values (598); insert into qq values (599); insert into qq values (600); insert into qq values (601); insert into qq values (602); insert into qq values (603); insert into qq values (604); insert into qq values (605); insert into qq values (606); insert into qq values (607); insert into qq values (608); insert into qq values (609); insert into qq values (610); insert into qq values (611); insert into qq values (612); insert into qq values (613); insert into qq values (614); insert into qq values (615); insert into qq values (616); insert into qq values (617); insert into qq values (618); insert into qq values (619); insert into qq values (620); insert into qq values (621); insert into qq values (622); insert into qq values (623); insert into qq values (624); insert into qq values (625); insert into qq values (626); insert into qq values (627); insert into qq values (628); insert into qq values (629); insert into qq values (630); insert into qq values (631); insert into qq values (632); insert into qq values (633); insert into qq values (634); insert into qq values (635); insert into qq values (636); insert into qq values (637); insert into qq values (638); insert into qq values (639); insert into qq values (640); insert into qq values (641); insert into qq values (642); insert into qq values (643); insert into qq values (644); insert into qq values (645); insert into qq values (646); insert into qq values (647); insert into qq values (648); insert into qq values (649); insert into qq values (650); insert into qq values (651); insert into qq values (652); insert into qq values (653); insert into qq values (654); insert into qq values (655); insert into qq values (656); insert into qq values (657); insert into qq values (658); insert into qq values (659); insert into qq values (660); insert into qq values (661); insert into qq values (662); insert into qq values (663); insert into qq values (664); insert into qq values (665); insert into qq values (666); insert into qq values (667); insert into qq values (668); insert into qq values (669); insert into qq values (670); insert into qq values (671); insert into qq values (672); insert into qq values (673); insert into qq values (674); insert into qq values (675); insert into qq values (676); insert into qq values (677); insert into qq values (678); insert into qq values (679); insert into qq values (680); insert into qq values (681); insert into qq values (682); insert into qq values (683); insert into qq values (684); insert into qq values (685); insert into qq values (686); insert into qq values (687); insert into qq values (688); insert into qq values (689); insert into qq values (690); insert into qq values (691); insert into qq values (692); insert into qq values (693); insert into qq values (694); insert into qq values (695); insert into qq values (696); insert into qq values (697); insert into qq values (698); insert into qq values (699); insert into qq values (700); insert into qq values (701); insert into qq values (702); insert into qq values (703); insert into qq values (704); insert into qq values (705); insert into qq values (706); insert into qq values (707); insert into qq values (708); insert into qq values (709); insert into qq values (710); insert into qq values (711); insert into qq values (712); insert into qq values (713); insert into qq values (714); insert into qq values (715); insert into qq values (716); insert into qq values (717); insert into qq values (718); insert into qq values (719); insert into qq values (720); insert into qq values (721); insert into qq values (722); insert into qq values (723); insert into qq values (724); insert into qq values (725); insert into qq values (726); insert into qq values (727); insert into qq values (728); insert into qq values (729); insert into qq values (730); insert into qq values (731); insert into qq values (732); insert into qq values (733); insert into qq values (734); insert into qq values (735); insert into qq values (736); insert into qq values (737); insert into qq values (738); insert into qq values (739); insert into qq values (740); insert into qq values (741); insert into qq values (742); insert into qq values (743); insert into qq values (744); insert into qq values (745); insert into qq values (746); insert into qq values (747); insert into qq values (748); insert into qq values (749); insert into qq values (750); insert into qq values (751); insert into qq values (752); insert into qq values (753); insert into qq values (754); insert into qq values (755); insert into qq values (756); insert into qq values (757); insert into qq values (758); insert into qq values (759); insert into qq values (760); insert into qq values (761); insert into qq values (762); insert into qq values (763); insert into qq values (764); insert into qq values (765); insert into qq values (766); insert into qq values (767); insert into qq values (768); insert into qq values (769); insert into qq values (770); insert into qq values (771); insert into qq values (772); insert into qq values (773); insert into qq values (774); insert into qq values (775); insert into qq values (776); insert into qq values (777); insert into qq values (778); insert into qq values (779); insert into qq values (780); insert into qq values (781); insert into qq values (782); insert into qq values (783); insert into qq values (784); insert into qq values (785); insert into qq values (786); insert into qq values (787); insert into qq values (788); insert into qq values (789); insert into qq values (790); insert into qq values (791); insert into qq values (792); insert into qq values (793); insert into qq values (794); insert into qq values (795); insert into qq values (796); insert into qq values (797); insert into qq values (798); insert into qq values (799); insert into qq values (800); insert into qq values (801); insert into qq values (802); insert into qq values (803); insert into qq values (804); insert into qq values (805); insert into qq values (806); insert into qq values (807); insert into qq values (808); insert into qq values (809); insert into qq values (810); insert into qq values (811); insert into qq values (812); insert into qq values (813); insert into qq values (814); insert into qq values (815); insert into qq values (816); insert into qq values (817); insert into qq values (818); insert into qq values (819); insert into qq values (820); insert into qq values (821); insert into qq values (822); insert into qq values (823); insert into qq values (824); insert into qq values (825); insert into qq values (826); insert into qq values (827); insert into qq values (828); insert into qq values (829); insert into qq values (830); insert into qq values (831); insert into qq values (832); insert into qq values (833); insert into qq values (834); insert into qq values (835); insert into qq values (836); insert into qq values (837); insert into qq values (838); insert into qq values (839); insert into qq values (840); insert into qq values (841); insert into qq values (842); insert into qq values (843); insert into qq values (844); insert into qq values (845); insert into qq values (846); insert into qq values (847); insert into qq values (848); insert into qq values (849); insert into qq values (850); insert into qq values (851); insert into qq values (852); insert into qq values (853); insert into qq values (854); insert into qq values (855); insert into qq values (856); insert into qq values (857); insert into qq values (858); insert into qq values (859); insert into qq values (860); insert into qq values (861); insert into qq values (862); insert into qq values (863); insert into qq values (864); insert into qq values (865); insert into qq values (866); insert into qq values (867); insert into qq values (868); insert into qq values (869); insert into qq values (870); insert into qq values (871); insert into qq values (872); insert into qq values (873); insert into qq values (874); insert into qq values (875); insert into qq values (876); insert into qq values (877); insert into qq values (878); insert into qq values (879); insert into qq values (880); insert into qq values (881); insert into qq values (882); insert into qq values (883); insert into qq values (884); insert into qq values (885); insert into qq values (886); insert into qq values (887); insert into qq values (888); insert into qq values (889); insert into qq values (890); insert into qq values (891); insert into qq values (892); insert into qq values (893); insert into qq values (894); insert into qq values (895); insert into qq values (896); insert into qq values (897); insert into qq values (898); insert into qq values (899); insert into qq values (900); insert into qq values (901); insert into qq values (902); insert into qq values (903); insert into qq values (904); insert into qq values (905); insert into qq values (906); insert into qq values (907); insert into qq values (908); insert into qq values (909); insert into qq values (910); insert into qq values (911); insert into qq values (912); insert into qq values (913); insert into qq values (914); insert into qq values (915); insert into qq values (916); insert into qq values (917); insert into qq values (918); insert into qq values (919); insert into qq values (920); insert into qq values (921); insert into qq values (922); insert into qq values (923); insert into qq values (924); insert into qq values (925); insert into qq values (926); insert into qq values (927); insert into qq values (928); insert into qq values (929); insert into qq values (930); insert into qq values (931); insert into qq values (932); insert into qq values (933); insert into qq values (934); insert into qq values (935); insert into qq values (936); insert into qq values (937); insert into qq values (938); insert into qq values (939); insert into qq values (940); insert into qq values (941); insert into qq values (942); insert into qq values (943); insert into qq values (944); insert into qq values (945); insert into qq values (946); insert into qq values (947); insert into qq values (948); insert into qq values (949); insert into qq values (950); insert into qq values (951); insert into qq values (952); insert into qq values (953); insert into qq values (954); insert into qq values (955); insert into qq values (956); insert into qq values (957); insert into qq values (958); insert into qq values (959); insert into qq values (960); insert into qq values (961); insert into qq values (962); insert into qq values (963); insert into qq values (964); insert into qq values (965); insert into qq values (966); insert into qq values (967); insert into qq values (968); insert into qq values (969); insert into qq values (970); insert into qq values (971); insert into qq values (972); insert into qq values (973); insert into qq values (974); insert into qq values (975); insert into qq values (976); insert into qq values (977); insert into qq values (978); insert into qq values (979); insert into qq values (980); insert into qq values (981); insert into qq values (982); insert into qq values (983); insert into qq values (984); insert into qq values (985); insert into qq values (986); insert into qq values (987); insert into qq values (988); insert into qq values (989); insert into qq values (990); insert into qq values (991); insert into qq values (992); insert into qq values (993); insert into qq values (994); insert into qq values (995); insert into qq values (996); insert into qq values (997); insert into qq values (998); insert into qq values (999);
498f31a1752fb651b8e45f4703a3d2543da000fe
[ "SQL", "Makefile", "C++" ]
25
C++
pollow/xyzSQL
5aec676bf59e45f6bea1608e7d2b31830b1f6d8e
65e1a0cde8004376319e43f8bf12f264ea19e8a8
refs/heads/master
<repo_name>hzshang/minijava<file_sep>/scaner.c #include "stand.h" #include "util.h" #include "error.h" #include "absyn.h" #include "minijava.tab.h" #include "state.h" YYSTYPE yylval; extern int line_num; int yylex(); string names[]={"ID","STRING","INT","CLASS","PUBLIC","STATIC","VOID","LPAREN","RPAREN","LBRACK","RBRACK","LBRACE","RBRACE","EXTENDS","SEMICOLON","BOOLEAN","IF","WHILE","PRINT","LENGTH","DOT","THIS","NEW","ASSIGN","PLUS","MINUS","TIMES","DIVIDE","EQ","LE","LT","GE","GT","AND","REVERSE","TRUE","FALSE","ELSE","COMMA","MAIN","RETURN","INT_ID","BOOLEAN_ID","STRING_ID","UMINUS","NEWLINE"}; string tokname(int tok) { return tok<258 || tok>303 ? "BAD_TOKEN" : names[tok-258]; } int main(int argc,char* argv[]){ if(argc == 1){ printf("%s filename\n",argv[0]); exit(0); } string fname = argv[1]; state_reset(fname); while(1){ enum yytokentype tok = yylex(); if(!tok) break; switch (tok){ case ID: case STRING: printf("%2d:%10s %4d %s\n",line_num,tokname(tok),token_pos,yylval.sval); break; case INT: printf("%2d:%10s %4d %d\n",line_num,tokname(tok),token_pos,yylval.ival); break; default: printf("%2d:%10s %4d\n",line_num,tokname(tok),token_pos); break; } } return 0; } <file_sep>/absyn.h /* * absyn.h * Copyright (C) 2018 eric <<EMAIL>> * * Distributed under terms of the MIT license. */ #ifndef ABSYN_H #define ABSYN_H #include "sym.h" typedef struct A_stm_* A_stm; typedef struct A_stm_list_* A_stm_list; typedef struct A_exp_* A_exp; typedef struct A_exp_list_* A_exp_list; typedef struct A_goal_* A_goal; typedef struct A_main_* A_main; typedef struct A_class_* A_class; typedef struct A_class_list_* A_class_list; typedef struct A_var_dec_* A_var_dec; typedef struct A_var_dec_list_* A_var_dec_list; typedef struct A_method_* A_method; typedef struct A_method_list_* A_method_list; typedef struct A_arg_dec_* A_arg_dec; typedef struct A_arg_dec_list_* A_arg_dec_list; typedef struct A_type_* A_type; typedef enum{A_and,A_plus,A_minus,A_times,A_lt} A_op; struct A_goal_ { A_main main; A_class_list classes; S_table tab; }; struct A_main_ { S_sym id; S_sym arg_id; A_stm stm; S_table tab; }; struct A_class_ { S_sym id; S_sym extend; // if no extend, it will be null A_var_dec_list vars; A_method_list methods; S_table tab; }; struct A_class_list_ { A_class val; A_class_list next; }; struct A_var_dec_ { A_type type; S_sym name; }; struct A_var_dec_list_{ A_var_dec val; A_var_dec_list next; }; struct A_method_ { A_type type; S_sym name; A_arg_dec_list args; A_stm_list stms; A_exp ret; S_table tab; }; struct A_method_list_{ A_method val; A_method_list next; }; struct A_arg_dec_ { A_type type; S_sym name; }; struct A_arg_dec_list_ { A_arg_dec val; A_arg_dec_list next; }; struct A_type_{ enum{A_type_int,A_type_array,A_type_boolean,A_type_sym,A_type_string} kind; union{ struct{S_sym name;} id; }u; }; struct A_stm_ { enum {A_stm_stms,A_stm_if_else,A_stm_loop,A_stm_print,A_stm_assign,A_stm_sub,A_stm_var_dec} kind; union{ struct{A_stm_list stms;} stms; struct{A_exp cond; A_stm yes; A_stm no;} cond; struct{A_exp cond; A_stm stm;} loop; struct{A_exp out;} print; struct{S_sym name; A_exp val;} assign; struct{S_sym name; A_exp sub;A_exp val;} sub; struct{A_var_dec var_dec;} var_dec; } u; S_table tab; }; struct A_stm_list_{ A_stm val; A_stm_list next; }; struct A_exp_ { enum{A_exp_ops,A_exp_sub,A_exp_length,A_exp_method,A_exp_int,A_exp_bool,A_exp_id,A_exp_this,A_exp_array,A_exp_new_id,A_exp_reverse,A_exp_exp,A_exp_uminus} kind; union{ struct{A_exp a;A_op op; A_exp b;} op; struct{A_exp exp;A_exp sub;} sub; struct{A_exp exp;} length; struct{A_exp exp;S_sym method;A_exp_list args;} method; struct{int val;} intval; struct{bool val;} boolval; struct{S_sym name;} id; //struct{} pointer; this struct{A_exp size;} array; struct{S_sym name;} new_id; struct{A_exp exp;} reverse; struct{A_exp exp;} exp; struct{A_exp exp;} uminus; } u; }; struct A_exp_list_ { A_exp val; A_exp_list next; }; A_goal A_goal_init(A_main,A_class_list); A_main A_main_init(S_sym,S_sym,A_stm); A_class A_class_init(S_sym,S_sym,A_var_dec_list,A_method_list); A_var_dec A_var_dec_init(A_type,S_sym); A_method A_method_init(A_type,S_sym,A_arg_dec_list,A_stm_list,A_exp); A_arg_dec A_arg_dec_init(A_type,S_sym); A_type A_type_init_int(); A_type A_type_init_array(); A_type A_type_init_boolean(); A_type A_type_init_string(); A_type A_type_init_sym(S_sym); A_stm A_stm_init_stm_list(A_stm_list); A_stm A_stm_init_cond(A_exp,A_stm,A_stm); A_stm A_stm_init_loop(A_exp,A_stm); A_stm A_stm_init_print(A_exp); A_stm A_stm_init_assign(S_sym,A_exp); A_stm A_stm_init_sub(S_sym,A_exp,A_exp); A_stm A_stm_init_var(A_var_dec); A_exp A_exp_init_op(A_exp,A_op,A_exp); A_exp A_exp_init_sub(A_exp,A_exp); A_exp A_exp_init_length(A_exp); A_exp A_exp_init_method(A_exp,S_sym,A_exp_list); A_exp A_exp_init_intval(int); A_exp A_exp_init_boolval(bool); A_exp A_exp_init_id(S_sym); A_exp A_exp_init_this(); A_exp A_exp_init_array(A_exp); A_exp A_exp_init_newid(S_sym); A_exp A_exp_init_reverse(A_exp); A_exp A_exp_init_exp(A_exp); A_exp A_exp_init_uminus(A_exp); A_class_list A_class_list_init_null(); A_class_list A_class_list_init_class(A_class c); A_class_list A_class_list_init_classes(A_class c,A_class_list next); A_stm_list A_stm_list_init_null(); A_stm_list A_stm_list_init_stm(A_stm s); A_stm_list A_stm_list_init_stms(A_stm s,A_stm_list next); /*TODO: not support int a,b,c; */ A_var_dec_list A_var_dec_list_init_null(); A_var_dec_list A_var_dec_list_init_var(A_var_dec val); A_var_dec_list A_var_dec_list_init_vars(A_var_dec val,A_var_dec_list next); A_arg_dec_list A_arg_dec_list_init_null(); A_arg_dec_list A_arg_dec_list_init_arg_dec(A_arg_dec arg); A_arg_dec_list A_arg_dec_list_init_arg_decs(A_arg_dec arg,A_arg_dec_list next); A_method_list A_method_list_init_null(); A_method_list A_method_list_init_method(A_method val); A_method_list A_method_list_init_methods(A_method val,A_method_list next); A_exp_list A_exp_list_init_null(); A_exp_list A_exp_list_init_exp(A_exp); A_exp_list A_exp_list_init_exps(A_exp,A_exp_list); /* * arg_dec 参数声明 用在函数eg: begin(int a,int b) arg_dec 表示 int a * arg_dec_list arg_dec_list 代表 int a,int b * * var 变量声明,被vars和stm使用 * vars 目前仅用在类的全局声明 * stm 代码块 * exp 表达式 * * TODO: * new ID() 允许传参 */ #endif /* !ABSYN_H */ <file_sep>/absyn.c #include "stand.h" #include "sym.h" #include "absyn.h" #include "error.h" A_goal A_goal_init(A_main main,A_class_list list){ A_goal g = safe_malloc(sizeof(*g)); g->main = main; g->classes = list; g->tab = S_table_init(S_dom_goal); if(err_count) return g; main->tab->parent = g->tab; S_table_add_dec(g->tab,main->id); A_class_list tmp = list; while(tmp){ tmp->val->tab->parent = g->tab; S_table_add_dec(g->tab,tmp->val->id); tmp = tmp->next; }; return g; } A_main A_main_init(S_sym id,S_sym arg_id,A_stm stm){ A_main mc = safe_malloc(sizeof(*mc)); mc->id = id; mc->arg_id = arg_id; mc->stm = stm; mc->tab = S_table_init(S_dom_main); if(err_count) return mc; S_table_add_dec(mc->tab,arg_id); S_table_add_stm(mc->tab,stm); return mc; } A_class A_class_init(S_sym id ,S_sym extend,A_var_dec_list vars,A_method_list methods){ A_class cls = safe_malloc(sizeof(*cls)); cls->id = id; cls->extend = extend; cls->methods = methods; cls->vars = vars; cls->tab = S_table_init(S_dom_class); if(err_count) return cls; if(extend) S_table_add_use(cls->tab,extend); A_var_dec_list t = vars; while(t){ S_table_add_var_dec(cls->tab,t->val); t = t->next; } A_method_list tmp = methods; while(tmp){ tmp->val->tab->parent = cls->tab; S_table_add_dec(cls->tab,tmp->val->name); tmp = tmp->next; } return cls; } A_var_dec A_var_dec_init(A_type type,S_sym name){ A_var_dec v = safe_malloc(sizeof(*v)); v->type = type; v->name = name; return v; } A_method A_method_init(A_type type,S_sym name,A_arg_dec_list args, A_stm_list stms,A_exp ret){ A_method m = safe_malloc(sizeof(*m)); m->type = type; m->name = name; m->args = args; m->stms = stms; m->ret = ret; m->tab = S_table_init(S_dom_method); if(err_count) return m; S_table_add_type(m->tab,type); S_table_add_arg_dec_list(m->tab,args); S_table_add_stm_list(m->tab,stms); S_table_add_exp(m->tab,ret); return m; } A_arg_dec A_arg_dec_init(A_type type,S_sym name){ A_arg_dec a = safe_malloc(sizeof(*a)); a->type = type; a->name = name; return a; } A_type A_type_init_int(){ A_type t = safe_malloc(sizeof(*t)); t->kind = A_type_int; return t; } A_type A_type_init_array(){ A_type t = safe_malloc(sizeof(*t)); t->kind = A_type_array; return t; } A_type A_type_init_boolean(){ A_type t = safe_malloc(sizeof(*t)); t->kind = A_type_boolean; return t; } A_type A_type_init_string(){ A_type t = safe_malloc(sizeof(*t)); t->kind = A_type_string; return t; } A_type A_type_init_sym(S_sym name){ A_type t = safe_malloc(sizeof(*t)); t->kind = A_type_sym; t->u.id.name = name; return t; } A_stm A_stm_init_stm_list(A_stm_list stms){ A_stm s = safe_malloc(sizeof(*s)); s->kind = A_stm_stms; s->u.stms.stms = stms; s->tab = S_table_init(S_dom_stm); if(err_count) return s; S_table_add_stm_list(s->tab,stms); return s; } A_stm A_stm_init_cond(A_exp cond,A_stm yes, A_stm no){ A_stm s = safe_malloc(sizeof(*s)); s->kind = A_stm_if_else; s->u.cond.cond = cond; s->u.cond.yes = yes; s->u.cond.no = no; s->tab = S_table_init(S_dom_stm); //S_table_add_stm(s->tab,yes); //S_table_add_stm(s->tab,no); return s; } A_stm A_stm_init_loop(A_exp cond, A_stm stm){ A_stm s = safe_malloc(sizeof(*s)); s->kind = A_stm_loop; s->u.loop.cond = cond; s->u.loop.stm = stm; s->tab = S_table_init(S_dom_stm); // S_table_add_stm(s->tab,stm); return s; } A_stm A_stm_init_print(A_exp out){ A_stm s = safe_malloc(sizeof(*s)); s->kind = A_stm_print; s->u.print.out = out; s->tab = S_table_init(S_dom_stm); return s; } A_stm A_stm_init_assign(S_sym name,A_exp val){ A_stm s = safe_malloc(sizeof(*s)); s->kind = A_stm_assign; s->u.assign.name = name; s->u.assign.val = val; s->tab = S_table_init(S_dom_stm); return s; } A_stm A_stm_init_sub(S_sym name,A_exp sub,A_exp val){ A_stm s = safe_malloc(sizeof(*s)); s->kind = A_stm_sub; s->u.sub.name = name; s->u.sub.sub = sub; s->u.sub.val = val; s->tab = S_table_init(S_dom_stm); return s; } A_stm A_stm_init_var(A_var_dec v){ A_stm s = safe_malloc(sizeof(*s)); s->kind = A_stm_var_dec; s->u.var_dec.var_dec = v; s->tab = S_table_init(S_dom_stm); return s; } A_exp A_exp_init_op(A_exp a,A_op op,A_exp b){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_ops; e->u.op.a = a; e->u.op.op = op; e->u.op.b = b; return e; } A_exp A_exp_init_sub(A_exp exp,A_exp sub){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_sub; e->u.sub.exp = exp; e->u.sub.sub = sub; return e; } A_exp A_exp_init_length(A_exp exp){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_length; e->u.length.exp = exp; return e; } A_exp A_exp_init_method(A_exp exp,S_sym name,A_exp_list args){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_method; e->u.method.exp = exp; e->u.method.method = name; e->u.method.args = args; return e; } A_exp A_exp_init_intval(int val){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_int; e->u.intval.val = val; return e; } A_exp A_exp_init_boolval(bool val){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_bool; e->u.boolval.val = val; return e; } A_exp A_exp_init_id(S_sym name){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_id; e->u.id.name = name; return e; } A_exp A_exp_init_this(){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_this; return e; } A_exp A_exp_init_array(A_exp size){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_array; e->u.array.size = size; return e; } A_exp A_exp_init_newid(S_sym name){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_new_id; e->u.new_id.name = name; return e; } A_exp A_exp_init_reverse(A_exp exp){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_reverse; e->u.reverse.exp = exp; return e; } A_exp A_exp_init_exp(A_exp exp){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_exp; e->u.exp.exp = exp; return e; } A_exp A_exp_init_uminus(A_exp exp){ A_exp e = safe_malloc(sizeof(*e)); e->kind = A_exp_uminus; e->u.uminus.exp = exp; return e; } A_class_list A_class_list_init_class(A_class c){ if(!c) return NULL; A_class_list l = safe_malloc(sizeof(*l)); l->val = c; l->next = NULL; return l; } A_class_list A_class_list_init_classes(A_class c,A_class_list next){ if(!c)return NULL; A_class_list l = safe_malloc(sizeof(*l)); l->val = c; l->next = next; return l; } A_stm_list A_stm_list_init_stm(A_stm s){ if(!s)return NULL; A_stm_list l = safe_malloc(sizeof(*l)); l->val = s; l->next = NULL; return l; } A_stm_list A_stm_list_init_stms(A_stm s,A_stm_list next){ if(!s)return NULL; A_stm_list l = safe_malloc(sizeof(*l)); l->val = s; l->next = next; return l; } A_var_dec_list A_var_dec_list_init_var(A_var_dec val){ if(!val)return NULL; A_var_dec_list l = safe_malloc(sizeof(*l)); l->val = val; l->next = NULL; return l; } A_var_dec_list A_var_dec_list_init_vars(A_var_dec val,A_var_dec_list next){ if(!val)return NULL; A_var_dec_list l = safe_malloc(sizeof(*l)); l->val = val; l->next = next; return l; } A_arg_dec_list A_arg_dec_list_init_arg_dec(A_arg_dec arg){ if(!arg) return NULL; A_arg_dec_list l = safe_malloc(sizeof(*l)); l->val = arg; l->next = NULL; return l; } A_arg_dec_list A_arg_dec_list_init_arg_decs(A_arg_dec arg,A_arg_dec_list next){ if(!arg) return NULL; A_arg_dec_list l = safe_malloc(sizeof(*l)); l->val = arg; l->next = next; return l; } A_method_list A_method_list_init_method(A_method val){ if(!val) return NULL; A_method_list l = safe_malloc(sizeof(*l)); l->val = val; l->next = NULL; return l; } A_method_list A_method_list_init_methods(A_method val,A_method_list next){ if(!val) return NULL; A_method_list l = safe_malloc(sizeof(*l)); l->val = val; l->next = next; return l; } A_exp_list A_exp_list_init_exp(A_exp val){ if(!val) return NULL; A_exp_list l = safe_malloc(sizeof(*l)); l->val = val; l->next = NULL; return l; } A_exp_list A_exp_list_init_exps(A_exp val,A_exp_list next){ if(!val) return NULL; A_exp_list l = safe_malloc(sizeof(*l)); l->val = val; l->next = next; return l; } A_class_list A_class_list_init_null(){ return NULL; } A_method_list A_method_list_init_null(){ return NULL; } A_var_dec_list A_var_dec_list_init_null(){ return NULL; } A_arg_dec_list A_arg_dec_list_init_null(){ return NULL; } A_stm_list A_stm_list_init_null(){ return NULL; } A_exp_list A_exp_list_init_null(){ return NULL; } <file_sep>/sym.h /* * sym.h * Copyright (C) 2018 eric <<EMAIL>> * * Distributed under terms of the MIT license. */ #ifndef SYM_H #define SYM_H #include "util.h" #include "error.h" typedef struct A_stm_* A_stm; typedef struct A_stm_list_* A_stm_list; typedef struct A_exp_* A_exp; typedef struct A_exp_list_* A_exp_list; typedef struct A_goal_* A_goal; typedef struct A_main_* A_main; typedef struct A_class_* A_class; typedef struct A_class_list_* A_class_list; typedef struct A_var_dec_* A_var_dec; typedef struct A_var_dec_list_* A_var_dec_list; typedef struct A_method_* A_method; typedef struct A_method_list_* A_method_list; typedef struct A_arg_dec_* A_arg_dec; typedef struct A_arg_dec_list_* A_arg_dec_list; typedef struct A_type_* A_type; typedef struct S_sym_* S_sym; typedef struct S_table_* S_table; typedef struct S_chain_* S_chain; typedef enum {S_method,S_class,S_var,S_mainclass,S_unknown} S_type; typedef enum { S_dom_goal,S_dom_main,S_dom_class,S_dom_method,S_dom_stm,S_dom_unknown} S_dom; #define S_SYM_THIS ((S_sym)1) #define S_SYM_BAD ((S_sym)2) struct S_sym_ { string name; int pos; S_type kind; union { struct{ S_sym parent;} method; }u; }; struct S_chain_{ S_sym val; S_chain next; }; struct S_table_ { S_chain dec; S_chain use; S_table parent; union { struct { A_goal goal;} goal; } u; S_dom dom; }; S_sym S_symbol(S_sym s); S_sym S_symbol_init(string name,int); S_sym S_sym_lookup(S_table,S_sym); S_table S_table_init(S_dom); void S_table_add_use(S_table,S_sym); void S_table_add_dec(S_table,S_sym); void S_table_add_exp(); void S_table_add_type(S_table tab,A_type t); void S_table_add_stm_list(S_table tab,A_stm_list list); void S_table_add_arg_dec_list(S_table tab,A_arg_dec_list args); void S_table_add_var_dec(S_table t,A_var_dec v); void S_table_add_type(S_table tab,A_type t); void S_table_add_exp_list(S_table tab,A_exp_list list); void S_table_add_exp(S_table tab,A_exp t); void S_table_add_stm(S_table tab,A_stm stm); void S_check_goal(A_goal g); void S_check_class(A_class c); void S_check_method(A_method m); void S_check_stm(A_stm s); void S_check_main(A_main); #endif /* !SYM_H */ <file_sep>/parse.c #include "stand.h" #include "util.h" #include "absyn.h" #include "minijava.tab.h" #include "error.h" #include "state.h" #include "parse_tree.h" #include "sym.h" YYSTYPE yylval; extern A_goal root; extern int line_num; extern int yydebug; extern int yyparse(void); extern E_error err; int main(int argc,char* argv[]){ if(argc == 1){ ERR("Usage: %s filename [-D] [-O]\n",argv[0]); exit(1); } int enable_out = 0; for(int i =1;i< argc;i++){ if (argv[i][0] == '-'){ switch(argv[i][1]){ case 'D': yydebug = 1; break; case 'O': enable_out = 1; break; } } } string fname = argv[1]; state_reset(fname); yyparse(); if(!err_count) S_check_goal(root); if(err_count){ ERR("%d errors found\n",err_count); ERR("parse terminated\n"); exit(1); } if(enable_out){ parse_goal(root); printf(";\n"); } return 0; } <file_sep>/error.h #ifndef ERROR_H #define ERROR_H #define E_NO_SEMICOLON "Expect a ';'" #define E_NO_RETURN "Expect a return at method end" #define E_NO_MATCH "Expect a %s, given '%s'" typedef enum {E_lexicon,E_syntax,E_semantics,E_unknown,E_none} E_type; typedef struct E_error_* E_error; typedef struct E_pos_* E_pos; struct E_pos_ { int row; int column; }; struct E_error_ { E_pos pos; string line; string given; E_type kind; union{ struct{} stm; struct{} exp; } u; }; void record_error(int,int,E_type); void show_error(char const *,...); E_error E_error_init(); E_pos E_pos_init(); void E_pos_locate(E_pos,int); extern E_error err; extern FILE* copy; extern int err_count; #endif /* !ERROR_H */ <file_sep>/error.c #include "stand.h" #include "util.h" #include "error.h" #include "state.h" E_error err; FILE* copy; int err_count; static string str_dup_line(string p){ string it = p; while(*it && *it!='\n') it++; int len = it-p; string r = safe_malloc(len+1); memcpy(r,p,len); r[len] = '\x00'; return r; } void record_error(int pos,int len,E_type type){ size_t alloc_size; string line = NULL; E_pos_locate(err->pos,pos); fseek(copy,lines[err->pos->row],SEEK_SET); getline(&line,&alloc_size,copy); int length = strlen(line); if(line[length-1]=='\n') line[length-1]='\x00'; err->line = line; // given char fseek(copy,pos,SEEK_SET); err->given = String_init_len(len); err->given[fread(err->given,len,sizeof(char),copy)] = '\x00'; err->kind = type; } void show_error(char const * msg,...){ va_list ap; string err_type; switch (err->kind){ case E_lexicon: err_type = "Lexical error"; break; case E_syntax: err_type = "Syntax error"; break; case E_semantics: err_type = "Semantic error"; break; case E_unknown: err_type = "Unknown error"; break; default: assert(false); break; } ERR("%s: %d:%d: ",err_type,err->pos->row,err->pos->column); va_start(ap,msg); vfprintf(stderr,msg,ap); va_end(ap); ERR("\n> %s\n",err->line); // \t == 4 char int len = err->pos->column-1; int msg_len = 2; for(int i=0;i<len;i++){ if(err->line[i] == '\t') msg_len+=4; else msg_len++; } ERR("%*c%s\n",msg_len,' ',GREEN("^")); err_count++; } E_error E_error_init(){ E_error e = safe_malloc(sizeof(*e)); memset(e,0,sizeof(*e)); e->kind = E_none; e->pos = E_pos_init(); return e; } E_pos E_pos_init(){ E_pos p = safe_malloc(sizeof(*p)); memset(p,0,sizeof(*p)); return p; } void E_pos_locate(E_pos e,int len){ int num = line_num; while(lines[num] > len) num--; e->row = num; e->column = len - lines[num]+1; } <file_sep>/sym.c #include "stand.h" #include "state.h" #include "sym.h" #include "error.h" #include "absyn.h" #include "util.h" #define CHECK do{ \ if(err_count) return;\ } while(0) S_sym S_symbol_init(string name,int pos){ /* S_sym tmp = S_sym_lookup(t,name); if(!tmp){ tmp = S_sym_init(name); S_table_add(t,tmp); } return tmp; */ S_sym r = safe_malloc(sizeof(*r)); string tmp_name = safe_malloc(strlen(name)+1); strcpy(tmp_name,name); r->name = tmp_name; r->kind = S_unknown; r->pos = pos; return r; } S_sym S_sym_lookup_method(S_table t,S_sym s){ ERR("don't support method\n"); return NULL; } S_sym S_sym_lookup_var(S_table t,S_sym s){ S_table tmp = t; while(tmp){ S_chain p = tmp->dec; while(p){ if(!strcmp(p->val->name,s->name)) return p->val; p = p->next; } tmp = tmp->parent; } return NULL; } S_sym S_sym_lookup(S_table t,S_sym s){ if(s->kind == S_method){ return S_sym_lookup_method(t,s); }else{ return S_sym_lookup_var(t,s); } } S_table S_table_init(S_dom dom){ S_table t = safe_malloc(sizeof(*t)); t->use = NULL; t->dec = NULL; t->parent = NULL; t->dom = dom; } S_chain S_chain_init(S_sym id){ S_chain c = safe_malloc(sizeof(*c)); c->val = id; c->next = NULL; return c; } void S_table_add_use(S_table t, S_sym s){ assert(s!=NULL); if(!s)return; S_chain c = S_chain_init(s); c->next = t->use; t->use = c; } void S_table_add_dec(S_table t,S_sym s){ assert(s!=NULL); if(!s)return; S_chain c = S_chain_init(s); c->next = t->dec; t->dec = c; } void S_table_add_var_dec(S_table t,A_var_dec v){ S_table_add_dec(t,v->name); S_table_add_type(t,v->type); } void S_table_add_arg_dec_list(S_table tab,A_arg_dec_list args){ A_arg_dec_list tmp = args; while(tmp){ S_table_add_dec(tab,tmp->val->name); S_table_add_type(tab,tmp->val->type); tmp = tmp->next; } } void S_table_add_stm_list(S_table tab,A_stm_list list){ A_stm_list tmp = list; while(tmp){ S_table_add_stm(tab,tmp->val); tmp = tmp->next; } } void S_table_add_type(S_table tab,A_type t){ if(t->kind == A_type_sym){ S_table_add_use(tab,t->u.id.name); } } void S_table_add_exp_list(S_table tab,A_exp_list list){ A_exp_list tmp = list; while(tmp){ S_table_add_exp(tab,tmp->val); tmp = tmp->next; } } void S_table_add_exp(S_table tab,A_exp t){ switch(t->kind){ case A_exp_ops: S_table_add_exp(tab,t->u.op.a); S_table_add_exp(tab,t->u.op.b); break; case A_exp_sub: S_table_add_exp(tab,t->u.sub.exp); S_table_add_exp(tab,t->u.sub.sub); break; case A_exp_length: S_table_add_exp(tab,t->u.length.exp); break; case A_exp_method: S_table_add_exp(tab,t->u.method.exp); S_table_add_use(tab,t->u.method.method); S_table_add_exp_list(tab,t->u.method.args); break; case A_exp_id: S_table_add_use(tab,t->u.id.name); break; case A_exp_array: S_table_add_exp(tab,t->u.array.size); break; case A_exp_new_id: S_table_add_use(tab,t->u.new_id.name); break; case A_exp_reverse: S_table_add_exp(tab,t->u.reverse.exp); break; case A_exp_exp: S_table_add_exp(tab,t->u.exp.exp); break; case A_exp_uminus: S_table_add_exp(tab,t->u.uminus.exp); break; default: break; } } void S_table_add_stm(S_table tab,A_stm stm){ switch(stm->kind){ case A_stm_stms: S_table_add_stm_list(tab,stm->u.stms.stms); break; case A_stm_if_else: // stm->tab->parent = tab; S_table_add_exp(tab,stm->u.cond.cond); stm->u.cond.yes->tab->parent = tab; stm->u.cond.no->tab->parent = tab; break; case A_stm_loop: S_table_add_exp(tab,stm->u.loop.cond); stm->u.loop.stm->tab->parent = tab; break; case A_stm_print: S_table_add_exp(tab,stm->u.print.out); break; case A_stm_assign: S_table_add_use(tab,stm->u.assign.name); S_table_add_exp(tab,stm->u.assign.val); break; case A_stm_sub: S_table_add_use(tab,stm->u.sub.name); S_table_add_exp(tab,stm->u.sub.sub); S_table_add_exp(tab,stm->u.sub.val); break; case A_stm_var_dec: S_table_add_var_dec(tab,stm->u.var_dec.var_dec); break; default: assert(false); break; } } /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ /* * check begin */ void S_check_use_chain(S_table tab){ S_chain p = tab->use; while(p){ if(p->val->kind !=S_method){ if (S_sym_lookup(tab,p->val)){ // found a symbol check // ERR("found symbol :%s\n",p->val->name); }else{ // can't find symbol record_error(p->val->pos,strlen(p->val->name),E_semantics); show_error("unresolvable symbol '%s'",p->val->name); } } p = p->next; } } void S_check_dec_chain(S_table tab){ S_chain p = tab->dec; while(p){ if (p->val->kind !=S_method){ S_sym r = S_sym_lookup(tab,p->val); if (r !=NULL && r!= p->val){ // found a symbol check ERR("redefine symbol found:\n"); record_error(p->val->pos,strlen(p->val->name),E_semantics); show_error("symbol '%s'",p->val->name); record_error(r->pos,strlen(r->name),E_semantics); show_error("symbol '%s'",r->name); } } p = p->next; } } #define S_CHECK(x) do{\ S_check_use_chain(x->tab);\ S_check_dec_chain(x->tab);\ } while(0) void S_check_goal(A_goal g){ S_CHECK(g); S_check_main(g->main); A_class_list tmp = g->classes; while(tmp){ S_check_class(tmp->val); tmp = tmp->next; } } void S_check_main(A_main main){ S_CHECK(main); S_check_stm(main->stm); } void S_check_class(A_class c){ S_CHECK(c); S_check_use_chain(c->tab); S_check_use_chain(c->tab); A_method_list m = c->methods; while(m){ S_check_method(m->val); m = m->next; } } static inline void S_check_stm_list(A_stm_list list){ A_stm_list tmp = list; while(tmp){ S_check_stm(tmp->val); tmp = tmp->next; } } void S_check_method(A_method m){ S_CHECK(m); S_check_stm_list(m->stms); } void S_check_stm(A_stm s){ S_CHECK(s); switch(s->kind){ case A_stm_stms: S_check_stm_list(s->u.stms.stms); break; case A_stm_if_else: S_check_stm(s->u.cond.yes); S_check_stm(s->u.cond.no); break; case A_stm_loop: S_check_stm(s->u.loop.stm); break; default: break; } } <file_sep>/state.c #include "stand.h" #include "util.h" #include "error.h" #include "state.h" int line_num; int lines[MAX_LINE]; int state_pos; int token_pos; int last_token_pos[2]; int last_token_len[2]; extern FILE* yyin; void state_newline(){ line_num ++; lines[line_num] = state_pos; } void state_reset(string file){ line_num = 1; yyin = fopen(file,"r"); if(!yyin){ ERR("open %s failed\n",file); exit(1); } copy = fdopen(yyin->_fileno,"r"); if(!copy){ ERR("open copy mode fail\n"); } err = E_error_init(); err_count = 0; } <file_sep>/case/t1.java class MainClass{ public static void main(String[] a){ System.out.println(1+3*4); } }
adc97a7d6ffde7d0b85d42a5483b838602b86c06
[ "Java", "C" ]
10
C
hzshang/minijava
16d5de05bf9b5ba549a284e848585dfc9520018e
e359eef0a3a0ebacc9aae94f895c7a680e140901
refs/heads/master
<file_sep>import React from 'react'; import classNames from 'classnames'; import Head from 'next/head'; import InfoAndInputBox from '../components/InfoAndInputBox'; import envelopeIcon from './icons/envelope.svg'; import personAtDeskIcon from './icons/person-at-desk.svg'; import phoneIcon from './icons/phone.svg'; import plusCircleImage from './icons/plus-circle.svg'; import css from './styles.less'; const JobInputs = ({ items, onItemsChanged }) => { const handleAddClick = () => { onItemsChanged([ ...items, { howMuch: '', position: '', where: '', }, ]); }; const handleItemChanged = (idx, updatedItem) => onItemsChanged(items.map((job, i) => (i === idx ? updatedItem : job))); return ( <div> <ul className={classNames({ [css['u-marginBottom']]: items.length > 0 })}> {items.map((job, idx) => ( <li key={idx} className={css.jobItem}> <div className={css.jobItemHeader}> <span>Jobb</span> </div> <div className={css.multipleInputContainer}> <label className={css.label}> <span className={css.labelContent}>Hvor jobber du?</span> <input className={css.field} type="text" value={job.where} placeholder="" onChange={evt => { handleItemChanged(idx, { ...job, where: evt.target.value }); }} /> </label> <label className={css.label}> <span className={css.labelContent}>Hvor mye jobber du?</span> <input className={css.field} type="text" value={job.howMuch} placeholder="" onChange={evt => { handleItemChanged(idx, { ...job, howMuch: evt.target.value }); }} /> </label> </div> </li> ))} </ul> <div className={css.addJobButtonContainer}> <button onClick={e => console.log('legg til jobb')} className={classNames(css.button, css.wide)} type="submit" > <img className={css.icon} src={plusCircleImage} /> <span className={css.iconButtonContent}>Legg til jobb</span> </button> </div> </div> ); }; const ReferenceInputs = ({ items, onItemsChanged }) => { const handleAddClick = () => { onItemsChanged([ ...items, { name: '', relation: '', }, ]); }; const handleItemChanged = (idx, updatedItem) => onItemsChanged(items.map((w, i) => (i === idx ? updatedItem : w))); const referenceRelationshipOptions = ['Tidligere utleier', 'Arbeidsgiver', 'Tidligere arbeidsgiver'].map( (relation, index) => ({ value: index, name: relation, }), ); return ( <div> <ul className={classNames({ [css['u-marginBottom']]: items.length })}> {items.map((reference, idx) => ( <li key={idx} className={css.referenceItem}> <div className={css.multipleInputContainer}> <label className={css.label}> <span className={css.labelContent}>Navn på referanse</span> <input className={css.field} type="text" value={reference.name} placeholder="" onChange={evt => { handleItemChanged(idx, { ...reference, name: evt.target.value }); }} /> </label> <Dropdown items={referenceRelationshipOptions} selectedValue={reference.relation || null} onChange={value => { handleItemChanged(idx, { ...reference, relation: value, }); }} label="Relasjon til referanse" emptyValue="Velg relasjon" /> </div> </li> ))} </ul> <div className={css.addReferenceButtonContainer}> <button onClick={e => console.log('legg til referanse')} className={classNames(css.button, css.wide)} type="submit" > <img className={css.icon} src={plusCircleImage} /> <span className={css.iconButtonContent}>Legg til referanse</span> </button> </div> </div> ); }; class Home extends React.Component { constructor(props) { super(props); this.state = { phoneNumber: null, email: null, jobs: [], references: [], }; } render() { const { t } = this.props; return ( <div> <Head> <title>Lag bolig-CV</title> </Head> <div className={css.container}> <div className={classNames(css.box, css.headerImageContainer)}></div> <div className={css.introContainer}> <h1>Lag bolig-CV</h1> <p> Når du leter etter en bolig kan det være lurt å{' '} <strong>sende en bolig-CV til utleier</strong>, slik at de blir bedre kjent med deg. <br /> <br /> Fyll inn litt mer info. om deg selv her, og del lenken med utleiere! Det du skriver her blir kun lagret på din bolig-CV, ikke på din Leile-profil. </p> </div> <> <div className={css.box}> <InfoAndInputBox heading="Kontaktinfo" description="Utleiere bør ha mulighet kontakte deg på telefon og e-post" iconSrc={envelopeIcon} > <div className={classNames(css.boxContent, css.multipleInputContainer)}> <label className={css.label}> <span className={css.labelContent}>Telefonnummer</span> <input className={css.field} type="text" value={this.state.phoneNumber} placeholder="" onChange={evt => { this.setState({ phoneNumber: evt.target.value }); }} /> </label> <label className={css.label}> <span className={css.labelContent}>E-post</span> <input className={css.field} type="text" value={this.state.email} placeholder="" onChange={evt => { this.setState({ email: evt.target.value }); }} /> </label> </div> </InfoAndInputBox> </div> <div className={css.box}> <InfoAndInputBox heading="Jobb" iconSrc={personAtDeskIcon}> <div className={css.boxContent}> <JobInputs items={this.state.jobs} onItemsChanged={items => { this.setState({ jobs: items, jobsError: null }); }} /> </div> </InfoAndInputBox> </div> <div className={css.box}> <InfoAndInputBox heading="Referanser" description="Utleier vil kanskje kontakte dine referanser. Legg til 1-3 referanser dersom du har noen." iconSrc={phoneIcon} > <div className={css.boxContent}> <ReferenceInputs items={this.state.references} onItemsChanged={items => { this.setState({ references: items, referencesError: null, }); }} /> </div> </InfoAndInputBox> </div> <input type="button" value="Lagre" onClick={() => this.handleSaveClick} className={css.button} /> </> </div> </div> ); } handleSaveClick = async () => { // saveResume }; } export default Home;
25ac2ebe7d1d1d99051c4322e5f1619c8888d754
[ "JavaScript" ]
1
JavaScript
mathilwa/refaktorering
a7777fd19bacd694802b42281f3b462fe459ffed
93a544ba30e134f5fac18756e3be71468d955353
refs/heads/master
<repo_name>vannida-lim/programming-univbasics-4-square-array-dumbo-web-100719<file_sep>/lib/square_array.rb def square_array(array) index = 0 while index < array.length squared = array[index] * array[index] array[index] = squared index += 1 end array end
876834fe6233427a3610425077398079a1cead8b
[ "Ruby" ]
1
Ruby
vannida-lim/programming-univbasics-4-square-array-dumbo-web-100719
0b1bba2183cec9c2d3d702dcbc411111a7a4f46b
b7f0d2e8c69c3f08b0b0cc4f742d8d2bdc2b8548
refs/heads/master
<file_sep># Tina 创建项目Tina, tian! 【功能】2018.8.8 添加 多线程下载功能 <file_sep>package tina.com.common.download.data; import android.content.Context; import android.database.sqlite.SQLiteDatabase; import com.live_common.download.gen.DaoMaster; import com.live_common.download.gen.DaoSession; import com.live_common.download.gen.DownloadInfoDao; import com.live_common.download.gen.ThreadInfoDao; import java.util.List; import tina.com.common.download.entity.DownloadInfo; import tina.com.common.download.entity.ThreadInfo; import tina.com.common.download.utils.Utils; /** * @author yxc * @date 2018/8/10 */ public class DBHelper { private static volatile DBHelper instance; private static final String DB_NAME = "tina_download"; private DaoSession daoSession; private SQLiteDatabase mWritableDatabase; private DownloadInfoDao mDownloadInfoDao; private ThreadInfoDao mThreadInfoDao; public static DBHelper getInstance(){ if (null == instance){ synchronized (DBHelper.class){ if (null == instance){ instance = new DBHelper(Utils.getContext()); } } } return instance; } public DBHelper(Context context){ mWritableDatabase = new TinaDBOpenHelper(context, getDbName(), null).getWritableDatabase(); daoSession = new DaoMaster(mWritableDatabase).newSession(); } public class TinaDBOpenHelper extends DaoMaster.OpenHelper{ public TinaDBOpenHelper(Context context, String name, SQLiteDatabase.CursorFactory factory) { super(context, name, factory); } } private String getDbName(){ return DB_NAME; } private DownloadInfoDao getDownloadInfoDao(){ mDownloadInfoDao = daoSession.getDownloadInfoDao(); return mDownloadInfoDao; } private ThreadInfoDao getThreadInfoDao(){ mThreadInfoDao = daoSession.getThreadInfoDao(); return mThreadInfoDao; } public void insertDownloadInfoTX(List<DownloadInfo> downloadInfos){ if (null == mDownloadInfoDao){ getDownloadInfoDao(); } mDownloadInfoDao.insertOrReplaceInTx(downloadInfos, true); } /** * 获取 DownloadInfo对应的 threadInfo * @param tag * @return */ public List<ThreadInfo> queryThreadInfoListByTag(String tag){ if (null == mThreadInfoDao){ getThreadInfoDao(); } return mThreadInfoDao._queryDownloadInfo_ThreadInfoList(tag); } /** * 获取所有的 DownloadInfo * @return */ public List<DownloadInfo> queryDownloadInfoAll(){ if (null == mDownloadInfoDao){ getDownloadInfoDao(); } // return mDownloadInfoDao.loadAll(); return mDownloadInfoDao.queryBuilder().where(DownloadInfoDao.Properties.Index.isNotNull()).orderAsc(DownloadInfoDao.Properties.Index).list(); } /** * 查询单个的 DownloadInfo * // * @param tag * @return */ public DownloadInfo queryDownloadInfo(String tag) { if (null == mDownloadInfoDao){ getDownloadInfoDao(); } return mDownloadInfoDao.load(tag); } public void newOrUpdate(DownloadInfo downloadInfo){ if (null == mDownloadInfoDao){ getDownloadInfoDao(); } mDownloadInfoDao.insertOrReplace(downloadInfo); } public void insertThreadInfoList(List<ThreadInfo> threadInfos){ if (null == mThreadInfoDao){ getThreadInfoDao(); } mThreadInfoDao.insertOrReplaceInTx(threadInfos, true); } public void newOrUpdateThreadInfo(ThreadInfo threadInfo){ if (null == mThreadInfoDao){ getThreadInfoDao(); } mThreadInfoDao.insertOrReplace(threadInfo); } public void deleteDownloadInfoByTag(String tag) { if (null == mDownloadInfoDao){ getDownloadInfoDao(); } mDownloadInfoDao.deleteByKey(tag); } // public void deleteThreadInfoByTag(String tag) { // if (null == mThreadInfoDao){ // getThreadInfoDao(); // } // List<ThreadInfo> threadInfos = queryThreadInfos(tag); // synchronized (_tlock){ // mThreadInfoDao.deleteInTx(threadInfos); // } // } /** * 查询tag的所有ThreadInfo * * @param tag * @return */ /** * 查询单个ThreadInfo * * @param tag * @param index * @return */ // public ThreadInfo queryThreadInfo(String tag, String index){ // if (null == mThreadInfoDao){ // getThreadInfoDao(); // } // ThreadInfo threadInfo = mThreadInfoDao.queryBuilder(). // where(ThreadInfoDao.Properties.Tag.eq(tag), ThreadInfoDao.Properties.Index.eq(index)).unique(); // return threadInfo; // } } <file_sep>include ':app', 'tina-common' <file_sep>package com.live_common.download.gen; import android.database.Cursor; import android.database.sqlite.SQLiteStatement; import org.greenrobot.greendao.AbstractDao; import org.greenrobot.greendao.Property; import org.greenrobot.greendao.internal.DaoConfig; import org.greenrobot.greendao.database.Database; import org.greenrobot.greendao.database.DatabaseStatement; import tina.com.common.download.entity.DownloadInfo; // THIS CODE IS GENERATED BY greenDAO, DO NOT EDIT. /** * DAO for table "DOWNLOAD_INFO". */ public class DownloadInfoDao extends AbstractDao<DownloadInfo, String> { public static final String TABLENAME = "DOWNLOAD_INFO"; /** * Properties of entity DownloadInfo.<br/> * Can be used for QueryBuilder and for referencing column names. */ public static class Properties { public final static Property Tag = new Property(0, String.class, "tag", true, "TAG"); public final static Property Index = new Property(1, int.class, "index", false, "INDEX"); public final static Property FileName = new Property(2, String.class, "fileName", false, "FILE_NAME"); public final static Property Url = new Property(3, String.class, "url", false, "URL"); public final static Property Finish = new Property(4, long.class, "finish", false, "FINISH"); public final static Property Length = new Property(5, long.class, "length", false, "LENGTH"); public final static Property Status = new Property(6, int.class, "status", false, "STATUS"); public final static Property Progress = new Property(7, int.class, "progress", false, "PROGRESS"); public final static Property Name = new Property(8, String.class, "name", false, "NAME"); public final static Property AcceptRanges = new Property(9, boolean.class, "acceptRanges", false, "ACCEPT_RANGES"); public final static Property Image = new Property(10, String.class, "image", false, "IMAGE"); public final static Property PackageName = new Property(11, String.class, "packageName", false, "PACKAGE_NAME"); public final static Property VersionCode = new Property(12, String.class, "versionCode", false, "VERSION_CODE"); } private DaoSession daoSession; public DownloadInfoDao(DaoConfig config) { super(config); } public DownloadInfoDao(DaoConfig config, DaoSession daoSession) { super(config, daoSession); this.daoSession = daoSession; } /** Creates the underlying database table. */ public static void createTable(Database db, boolean ifNotExists) { String constraint = ifNotExists? "IF NOT EXISTS ": ""; db.execSQL("CREATE TABLE " + constraint + "\"DOWNLOAD_INFO\" (" + // "\"TAG\" TEXT PRIMARY KEY NOT NULL ," + // 0: tag "\"INDEX\" INTEGER NOT NULL ," + // 1: index "\"FILE_NAME\" TEXT," + // 2: fileName "\"URL\" TEXT," + // 3: url "\"FINISH\" INTEGER NOT NULL ," + // 4: finish "\"LENGTH\" INTEGER NOT NULL ," + // 5: length "\"STATUS\" INTEGER NOT NULL ," + // 6: status "\"PROGRESS\" INTEGER NOT NULL ," + // 7: progress "\"NAME\" TEXT," + // 8: name "\"ACCEPT_RANGES\" INTEGER NOT NULL ," + // 9: acceptRanges "\"IMAGE\" TEXT," + // 10: image "\"PACKAGE_NAME\" TEXT," + // 11: packageName "\"VERSION_CODE\" TEXT);"); // 12: versionCode } /** Drops the underlying database table. */ public static void dropTable(Database db, boolean ifExists) { String sql = "DROP TABLE " + (ifExists ? "IF EXISTS " : "") + "\"DOWNLOAD_INFO\""; db.execSQL(sql); } @Override protected final void bindValues(DatabaseStatement stmt, DownloadInfo entity) { stmt.clearBindings(); String tag = entity.getTag(); if (tag != null) { stmt.bindString(1, tag); } stmt.bindLong(2, entity.getIndex()); String fileName = entity.getFileName(); if (fileName != null) { stmt.bindString(3, fileName); } String url = entity.getUrl(); if (url != null) { stmt.bindString(4, url); } stmt.bindLong(5, entity.getFinish()); stmt.bindLong(6, entity.getLength()); stmt.bindLong(7, entity.getStatus()); stmt.bindLong(8, entity.getProgress()); String name = entity.getName(); if (name != null) { stmt.bindString(9, name); } stmt.bindLong(10, entity.getAcceptRanges() ? 1L: 0L); String image = entity.getImage(); if (image != null) { stmt.bindString(11, image); } String packageName = entity.getPackageName(); if (packageName != null) { stmt.bindString(12, packageName); } String versionCode = entity.getVersionCode(); if (versionCode != null) { stmt.bindString(13, versionCode); } } @Override protected final void bindValues(SQLiteStatement stmt, DownloadInfo entity) { stmt.clearBindings(); String tag = entity.getTag(); if (tag != null) { stmt.bindString(1, tag); } stmt.bindLong(2, entity.getIndex()); String fileName = entity.getFileName(); if (fileName != null) { stmt.bindString(3, fileName); } String url = entity.getUrl(); if (url != null) { stmt.bindString(4, url); } stmt.bindLong(5, entity.getFinish()); stmt.bindLong(6, entity.getLength()); stmt.bindLong(7, entity.getStatus()); stmt.bindLong(8, entity.getProgress()); String name = entity.getName(); if (name != null) { stmt.bindString(9, name); } stmt.bindLong(10, entity.getAcceptRanges() ? 1L: 0L); String image = entity.getImage(); if (image != null) { stmt.bindString(11, image); } String packageName = entity.getPackageName(); if (packageName != null) { stmt.bindString(12, packageName); } String versionCode = entity.getVersionCode(); if (versionCode != null) { stmt.bindString(13, versionCode); } } @Override protected final void attachEntity(DownloadInfo entity) { super.attachEntity(entity); entity.__setDaoSession(daoSession); } @Override public String readKey(Cursor cursor, int offset) { return cursor.isNull(offset + 0) ? null : cursor.getString(offset + 0); } @Override public DownloadInfo readEntity(Cursor cursor, int offset) { DownloadInfo entity = new DownloadInfo( // cursor.isNull(offset + 0) ? null : cursor.getString(offset + 0), // tag cursor.getInt(offset + 1), // index cursor.isNull(offset + 2) ? null : cursor.getString(offset + 2), // fileName cursor.isNull(offset + 3) ? null : cursor.getString(offset + 3), // url cursor.getLong(offset + 4), // finish cursor.getLong(offset + 5), // length cursor.getInt(offset + 6), // status cursor.getInt(offset + 7), // progress cursor.isNull(offset + 8) ? null : cursor.getString(offset + 8), // name cursor.getShort(offset + 9) != 0, // acceptRanges cursor.isNull(offset + 10) ? null : cursor.getString(offset + 10), // image cursor.isNull(offset + 11) ? null : cursor.getString(offset + 11), // packageName cursor.isNull(offset + 12) ? null : cursor.getString(offset + 12) // versionCode ); return entity; } @Override public void readEntity(Cursor cursor, DownloadInfo entity, int offset) { entity.setTag(cursor.isNull(offset + 0) ? null : cursor.getString(offset + 0)); entity.setIndex(cursor.getInt(offset + 1)); entity.setFileName(cursor.isNull(offset + 2) ? null : cursor.getString(offset + 2)); entity.setUrl(cursor.isNull(offset + 3) ? null : cursor.getString(offset + 3)); entity.setFinish(cursor.getLong(offset + 4)); entity.setLength(cursor.getLong(offset + 5)); entity.setStatus(cursor.getInt(offset + 6)); entity.setProgress(cursor.getInt(offset + 7)); entity.setName(cursor.isNull(offset + 8) ? null : cursor.getString(offset + 8)); entity.setAcceptRanges(cursor.getShort(offset + 9) != 0); entity.setImage(cursor.isNull(offset + 10) ? null : cursor.getString(offset + 10)); entity.setPackageName(cursor.isNull(offset + 11) ? null : cursor.getString(offset + 11)); entity.setVersionCode(cursor.isNull(offset + 12) ? null : cursor.getString(offset + 12)); } @Override protected final String updateKeyAfterInsert(DownloadInfo entity, long rowId) { return entity.getTag(); } @Override public String getKey(DownloadInfo entity) { if(entity != null) { return entity.getTag(); } else { return null; } } @Override public boolean hasKey(DownloadInfo entity) { return entity.getTag() != null; } @Override protected final boolean isEntityUpdateable() { return true; } }
c8eb0a178589f3a7bddecc621019fbd319fc420f
[ "Markdown", "Java", "Gradle" ]
4
Markdown
yinxiucheng/TinaMutilDown
1f3664b376059b77b2bcedd701d75300afd5200f
7292457d367625e88c2067a8c4151ba307513f69
refs/heads/master
<file_sep>// initialize an object which will contain all variables and functions var $scope = {}; // new variable that will link to Firebase database $scope.remDatabase = new Firebase("https://updatemessage.firebaseio.com/clientdata/"); $scope.dialogUI = { create: function ($html) { //$($html).appendTo("#ui-dialog") $("#ui-dialog").css("top", $(window).innerHeight() / 2 - ($("#ui-dialog").height() * 0.5) + "px").css("position", "absolute").css("left", $(window).innerWidth() / 2 - ($("#ui-dialog").width() * 0.5) + "px"); $("#dialog-overlay").show(); $("#ui-dialog").show(); }, close: function () { $("#ui-dialog").hide(); $("#dialog-overlay").hide(); } } $scope.authWithLogin = function () { var username_input = $("#username_txtinp").val(); var password_input = $("#<PASSWORD>_txtinp").val(); var key_input = $("#key_txtinp").val(); var getHashFirebase = new Firebase("https://updatemessage.firebaseio.com/serverdata/auth-hash"); var decryptedHash; var getUserFirebase = new Firebase("https://updatemessage.firebaseio.com/serverdata/users/" + username_input + "/"); getHashFirebase.authWithCustomToken("<KEY>", function (error, authdata) { alert(error); alert(!error); if (!error) { // alert("got in to if"); getHashFirebase.on("value", function (data) { // alert("got in to getHashFirebase"); decryptedHash = CryptoJS.AES.decrypt(data.val(), key_input); decryptedHash = decryptedHash.toString(CryptoJS.enc.Utf8); //getUserFirebase = new Firebase("https://updatemessage.firebaseio.com/serverdata/users/" + username_input + "/"); getUserFirebase.authWithCustomToken(decryptedHash, function (error, authd) { //alert(error); getUserFirebase.child("password").on("value", function (datas) { var decryptedPassword = CryptoJS.AES.decrypt(datas.val(), key_input); decryptedPassword = decryptedPassword.toString(CryptoJS.enc.Utf8); if (decryptedPassword == password_input) { //alert("Password correct!"); getUserFirebase.child("key").on("value", function (keyu) { var jjj = keyu.val(); var keyToSeeValue = CryptoJS.AES.decrypt(jjj, key_input); keyToSeeValue = keyToSeeValue.toString(CryptoJS.enc.Utf8); //alert(keyToSeeValue); $scope.remDatabase.authWithCustomToken(keyToSeeValue, function (error, authdd) { //alert(error); if (!error) { //alert("yes it works..."); $scope.dialogUI.close(); location.reload(); } }); }); } }); }); }); } }); } // run the function when the DOM is ready and the page has loaded $(function () { localStorage.setItem("auth", "false"); $scope.yoyo = ""; $scope.find_out_param_url == ""; $scope.find_out_param_url = function (key_param, callback) { var search_param = location.search; if (search_param.indexOf(key_param) == -1) { if (callback == undefined) { return false; } else { callback(false); } } search_param = search_param.split(key_param + "="); yoyo = search_param[0]; var search_param_tmp = search_param[1]; if (search_param_tmp == undefined) { if (callback == undefined) { return false; } else { callback(false); } } else { var search_param_tmp_boyo = search_param_tmp.split("&"); //search_param_tmp = search_param_tmp_boyo[1]; if (callback == undefined) { return decodeURI(search_param_tmp_boyo[0]); } else { callback(decodeURI(search_param_tmp_boyo[0])); } } }; $scope.groupParam = $scope.find_out_param_url("groupname"); if ($scope.groupParam == false) { $scope.groupParam = "message"; } //bootbox.alert("Hi there!"); // when somethings changed in the Firebase, instantly update the text display $scope.remDatabase.child($scope.groupParam).on("value", function (snapshot) { $("#text_display").html("&nbsp;"); localStorage.setItem("auth", "true"); switch(snapshot.val().type) { case "message": $("#text_display").text(snapshot.val().value); break; case "youtube": $("#text_display").html("<iframe width='560' height='315' src='//www.youtube.com/embed/" + snapshot.val().value +"' frameborder='0' allowfullscreen></iframe>"); } }, function (error) { //alert(error); $scope.dialogUI.create(); }); //$scope.checkIfAuthIsLocal = function () { // if ($("[data-popup = 'true'") == "") { // return; // } else { // if (localStorage.getItem("auth") == "false") { $("[data-popup = 'true']").css("display", "block").appendTo("#ui-dialog"); // $scope.dialogUI.create() // } // } // } //setTimeout(function () { // $scope.checkIfAuthIsLocal() // }, 1300); // sendData function to store data in Firebase and update all clients $scope.sendData = function (text_message) { // local function to update and send changes var object = {}; object[$scope.groupParam] = { "value": text_message, "type": $("#type_select").val() }; $scope.remDatabase.update(object); } // used to center element on page horizontally $scope.centerElementOnPage = function (element, element_value, css_size_word) { if (css_size_word == "em") { var css_word = "em"; } if (css_size_word == "px") { var css_word = "px"; } $(element).css("position", "relative"); $(element).css("left", $(window).innerWidth() / 2 - (element_value * 0.5) + css_word); } //$scope.centerElementOnPage($("#text_display"), 500, "px"); $scope.centerElementOnPage($("#send_btn"), 65, "px"); $scope.centerElementOnPage($("#textarea"), 300, "px"); $scope.centerElementOnPage($("#type_select"), 125, "px"); $("#textarea").keyup(function (e) { if (e.keyCode === 13) { var text = $("#textarea").val(); $scope.sendData(text); $("#textarea").val(""); } }); $(window).resize(function () { $("#ui-dialog").css("top", $(window).innerHeight() / 2 - ($("#ui-dialog").height() * 0.5) + "px").css("position", "absolute").css("left", $(window).innerWidth() / 2 - ($("#ui-dialog").width() * 0.5) + "px"); $scope.centerElementOnPage($("#send_btn"), $("#send_btn").width(), "px"); $scope.centerElementOnPage($("#textarea"), 300, "px"); $scope.centerElementOnPage($("#type_select"), 125, "px"); //$("#textarea").val($(window).innerWidth()); }); });
c5d223c3993b9cfe6c14c678d31ea295c1705b51
[ "JavaScript" ]
1
JavaScript
codingcool21/firebaseDemo
6002dae148e63547f87959d810626e05d03b058d
03801b9f72832987d918fe041044c7ef39e7ca0f
refs/heads/master
<repo_name>svyatik/in-recorder<file_sep>/js/application.js // const remote = require('electron').remote; let record_process = 0; // 0 - stop // 1 - pause // 2 - record function Color(class_name, color) { this.class_name = class_name; this.color = color; } const action_record = new Color('record', '#c0392b'), action_pause = new Color('pause', '#d35400'), action_stop = new Color('stop', '#34495e'); const $button_panel = document.getElementById('left_group'); function addAnim(object) { $button_panel.classList.remove(action_record.class_name); $button_panel.classList.remove(action_pause.class_name); $button_panel.classList.remove(action_stop.class_name); document.getElementById('button_recorder').classList.remove('cool'); setTimeout(function() { document.getElementById('button_recorder').classList.add('cool'); }, 10); setTimeout(function() { $button_panel.classList.add(object.class_name); }, 50); setTimeout(function() { $button_panel.style.backgroundColor = object.color; }, 500); } let long_press; let up_disabling = false; document.getElementById('button_recorder').onmousedown = function() { up_disabling = false; if(record_process === 2) { addAnim(action_pause); record_process = 1; up_disabling = true; } long_press = setTimeout(function() { record_process = 0; addAnim(action_stop); up_disabling = true; $button_panel.classList.remove('active'); document.getElementById('buttons_group').classList.remove('active'); }, 1000); } document.getElementById('button_recorder').onmouseup = function() { if(!up_disabling && (record_process === 0 || record_process === 1)) { addAnim(action_record); record_process = 2; $button_panel.classList.add('active'); document.getElementById('buttons_group').classList.add('active'); } clearTimeout(long_press); } document.getElementById('button_recorder').onmouseout = function() { console.log('out'); } const $button_region = document.getElementById('button_region'); const $button_full = document.getElementById('button_full_screen'); $button_region.onclick = function() { $button_region.classList.add('active'); $button_full.classList.remove('active'); } $button_full.onclick = function() { $button_region.classList.remove('active'); $button_full.classList.add('active'); } let speaker = true; const $button_speaker = document.getElementById('button_sound'); $button_speaker.onclick = function() { if(speaker) { $button_speaker.classList.remove('disabled'); speaker = false; } else { $button_speaker.classList.add('disabled'); speaker = true; } } const $range = document.getElementById('range'); const $range_circle = document.getElementById('range_circle'); const range_offset = $range.offsetTop + 15; // 15 - it's margin top; const range_height = $range.clientHeight; console.log('height', range_height); let range_active = false; // console.log(range_height + range_offset); $range.addEventListener('mousedown', function(evt) { console.log(evt.clientY); if(evt.clientY+12 > range_height + range_offset) { $range_circle.style.top = range_offset + range_height - 30 + 'px'; console.log('top'); } else { console.log('else'); $range_circle.style.top = evt.clientY - range_offset + 'px'; } }); $range_circle.addEventListener("mousedown", function() { range_active = true; }); document.addEventListener("mousemove", function(evt) { if(range_active) { if(range_height > evt.clientY - range_offset + 11 && evt.clientY - range_offset + 1 > 0) $range_circle.style.top = evt.clientY - range_offset + 'px'; } }); document.addEventListener("mouseup", function() { range_active = false; }); const $colors = document.getElementById('colors'); const $items = $colors.getElementsByTagName("li"); for (var i = 0; i < $items.length; ++i) { $items[i].addEventListener('click', function() { if(this.dataset.color === '#fff') { $range_circle.style.borderWidth = '1px'; } else { $range_circle.style.borderWidth = '0px'; } $range_circle.style.backgroundColor = this.dataset.color; }); } <file_sep>/renderer.js // This file is required by the index.html file and will // be executed in the renderer process for that window. // All of the Node.js APIs are available in this process. const fs = require('fs') const {desktopCapturer} = require('electron') const toBuffer = require('blob-to-buffer') const remote = require('electron').remote; var recorder; var blobs = []; function startRecording() { desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => { if (error) throw error for (let i = 0; i < sources.length; ++i) { console.log(sources[i].name); if (sources[i].name === 'Entire screen') { console.log(navigator.mediaDevices); navigator.mediaDevices.getUserMedia({ audio: { mandatory: { chromeMediaSource: 'desktop' } }, video: { mandatory: { chromeMediaSource: 'desktop', minWidth: 1280, maxWidth: 1920, minHeight: 720, maxHeight: 1080 } } }).then((stream) => handleStream(stream)) .catch((e) => handleError(e)) return } } }) } let global_data = {}; // let global_left = 0; /*function handleStream (stream) { console.log("Stream: "+URL.createObjectURL(stream)) document.querySelector('video').src = URL.createObjectURL(stream) }*/ var video = document.createElement('video') function handleStream(stream) { console.log('stream!!!'); video.src = URL.createObjectURL(stream) video.addEventListener('timeupdate', drawFrame, false); video.autoplay = true; var canvas = document.createElement('canvas'); console.log("GLOBAL" +global_data.width); /* global_data = { width: 1200, height: 480, x: 0, y: 0 }; */ canvas.width = global_data.width; canvas.height = global_data.height; var ctx = canvas.getContext('2d'); // document.body.appendChild(canvas); // recorder = new MediaRecorder(stream) var array = []; blobs = [] var new_stream = canvas.captureStream(60); var video_new = document.createElement('video') video_new.srcObject = new_stream; video_new.autoplay = true; recorder = new MediaRecorder(new_stream); // document.body.appendChild(video_new); console.log('ready...!!!'); recorder.ondataavailable = function(event) { blobs.push(event.data); console.log("There " + blobs[0]); // console.log(blobs); }; function drawFrame(e) { this.pause(); ctx.drawImage(this, -global_data.x, -global_data.y); // ctx.fillRect(20,20,150,100); // canvas.toBlob(saveFrame, 'image/jpeg'); this.play(); } /* function saveFrame(blobe) { console.log('blob: '+blobe); blobs.push(blobe); }*/ video.addEventListener('ended', function() { console.log('ended video finally!!!!!!!'); }, false); recorder.onstop = function(e) { // video.stop(); console.log('on stop was here'); // URL.revokeObjectURL(stream); video.src = ""; // video.pause(); console.log('on stop: '+blobs.length); const blob = new Blob(blobs, {type: 'video/webm'}); var buffer = toBuffer(blob, function (err, buffer) { if (err) throw err buffer[0] // => 1 buffer.readUInt8(1) // => 2 var file = './tmp/video.webm'; fs.writeFile(file, buffer, function(err) { if (err) { console.error('Failed to save video ' + err); } else { console.log('Saved video: ' + file); createAnotherWindow(); } }); }) var arrayBuffer; var fileReader = new FileReader(); fileReader.onload = function(event) { arrayBuffer = event.target.result; // console.log(arrayBuffer); // createAnotherWindow(); }; fileReader.readAsArrayBuffer(blob); } recorder.start(); } function handleError (e) { console.log(e) } function stopRecording() { recorder.stop(); // console.log('stopped'); // console.log('blobs: '+blobs.length); // Get a Blob somehow... /*toArrayBuffer(new Blob(blobs, {type: 'video/mpeg'}), function(ab) { var buffer = toBuffer(ab); console.log("buffer: "+typeof buffer); console.log(ab); var file = './tmp/example.mpeg'; fs.writeFile(file, buffer, function(err) { if (err) { console.error('Failed to save video ' + err); } else { console.log('Saved video: ' + file); } }); });*/ } /*function toArrayBuffer(blob, cb) { let fileReader = new FileReader(); fileReader.onload = function() { console.log('two'); let arrayBuffer = this.result; cb(arrayBuffer); }; console.log('one'); fileReader.readAsArrayBuffer(blob); } function toBuffer(ab) { let buffer = new Buffer(ab.byteLength); let arr = new Uint8Array(ab); for (let i = 0; i < arr.byteLength; i++) { buffer[i] = arr[i]; } return buffer; }*/ // Record for 7 seconds and save to disk // startRecording(); // setTimeout(function() { stopRecording() }, 20000) /*document.getElementById('close').addEventListener('click', function(e) { var window = remote.getCurrentWindow() window.close() })*/ var record_process = false; let timer; document.getElementById('button_recorder').addEventListener('click', function(e) { // createAnotherWindow(); console.log('test'); return false; // setTimeout(minimize, 1000); // return false; // document.getElementsByClassName('wrapper2')[0].classList.add('active') if (!record_process) { const $timer_id = document.getElementById('timer') // var window_overflow = remote.getGlobal(); const { ipcRenderer } = require('electron'); ipcRenderer.send('record-message', 'record'); // console.log('overflow', window_overflow); // window_overflow.setIgnoreMouseEvents(true); startRecording() record_process = true console.log('start recording...') let seconds = 0, minutes = 0 timer = setInterval(function() { seconds++ if(seconds >= 60) { minutes++ seconds = 0 } let seconds_str = seconds, minutes_str = minutes; if(seconds.toString().length === 1) { seconds_str = '0' + seconds console.log(); } if(minutes.toString().length === 1) minutes_str = '0' + minutes; $timer_id.innerHTML = minutes_str + ':' + seconds_str; }, 1000) } else { // document.getElementsByClassName('wrapper2')[0].classList.remove('active') stopRecording() record_process = false console.log('stop recording') clearInterval(timer) // minimize() } }) function minimize() { var window = remote.getCurrentWindow() window.minimize() } const ipcRenderer = require('electron').ipcRenderer // ipcRenderer.send('record-message', 'start_recording'); ipcRenderer.on('info', (event, data) => { // console.log("left: ", data); global_data = data; console.log(global_data); // ipcMain.send('asynchronous-message', data); }); ipcRenderer.on('request', (event, data) => { console.log('request: '+data); // console.log("left: ", data); if(data === 'stop') { console.log('yes, we stop it!'); stopRecording(); } // ipcMain.send('asynchronous-message', data); }); /*document.getElementById('stop').addEventListener('click', function(e) { console.log('test'); document.getElementsByClassName('wrapper2')[0].classList.remove('active') stopRecording() record_process = false console.log('stop recording') })*/ // const remote = require('electron').remote; // window.onload = function() { const $btn_region = document.getElementById('button_region'); const $btn_full_screen = document.getElementById('button_full_screen'); const $btn_recorder = document.getElementById('button_recorder'); $btn_region.onclick = function() { $btn_region.classList.add('active'); $btn_full_screen.classList.remove('active'); } $btn_full_screen.onclick = function() { $btn_full_screen.classList.add('active'); $btn_region.classList.remove('active'); } let recording = false; $btn_recorder.onclick = function() { if(recording) { $btn_recorder.classList.remove('active'); recording = false; document.getElementById('msg_2').classList.remove('active'); document.getElementById('msg_1').classList.add('active'); } else { $btn_recorder.classList.add('active'); recording = true; document.getElementById('msg_1').classList.remove('active'); document.getElementById('msg_2').classList.add('active'); } } document.getElementById("btn_close").onclick = function() { var window = remote.getCurrentWindow(); window.close(); }; // } /*document.getElementById('test_open').addEventListener('click', function(e) { createAnotherWindow() });*/ const electron = require('electron') const BrowserWindow = electron.remote.BrowserWindow const path = require('path') const url = require('url') function createAnotherWindow() { console.log('create another window') // Create the browser window. videoWindow = new BrowserWindow({width: 980, height: 638, frame: true}) videoWindow.setMenu(null) videoWindow.show() // and load the index.html of the app. videoWindow.loadURL(url.format({ pathname: path.join(__dirname, './result.html'), protocol: 'file:', slashes: true })) // Open the DevTools. // videoWindow.webContents.openDevTools() // Emitted when the window is closed. videoWindow.on('closed', function () { // Dereference the window object, usually you would store windows // in an array if your app supports multi windows, this is the time // when you should delete the corresponding element. videoWindow = null }) } // createScreenOverflow(); // var global_ = 5; /*function createScreenOverflow() { console.log('screen overflow') const mainWindow = remote.getCurrentWindow() // Create the browser window. overflowWindow = new BrowserWindow({skipTaskbar: true, frame: true, transparent: true, focusable: false, minimizable: false}) overflowWindow.setMenu(null) overflowWindow.setAlwaysOnTop(true); overflowWindow.setResizable(false); // overflowWindow.setFullScreen(true); // setTimeout(function() { overflowWindow.setIgnoreMouseEvents(true); // }, 5000); overflowWindow.show() // and load the index.html of the app. overflowWindow.loadURL(url.format({ pathname: path.join(__dirname, './screen-overflow.html'), protocol: 'file:', slashes: true })) // Open the DevTools. overflowWindow.webContents.openDevTools() // Emitted when the window is closed. overflowWindow.on('closed', function () { // Dereference the window object, usually you would store windows // in an array if your app supports multi windows, this is the time // when you should delete the corresponding element. overflowWindow = null }) }*/ // This method will be called when Electron has finished // initialization and is ready to create browser windows. // Some APIs can only be used after this event occurs. <file_sep>/js/overflow.js const ipcRenderer = require('electron').ipcRenderer const $black_left = document.getElementById('black_left'), $black_right = document.getElementById('black_right'), $black_top = document.getElementById('black_top'), $black_bottom = document.getElementById('black_bottom'); const $wrapper_middle = document.getElementById('wrapper-middle'); ipcRenderer.on('action', (event, data) => { if(data === 'disable') { $wrapper_middle.style.display = 'none'; } }); (function () { 'use strict'; const $wrapper = document.getElementById('wrapper'); const global_width = $wrapper.offsetWidth; const global_height = $wrapper.offsetHeight; let rectangle = { x: 0, y: 0, width: global_width, height: global_height }; let drug_direct = ''; function move(cords) { if (cords.width !== undefined) { rectangle.width = cords.width; } if (cords.height !== undefined) { rectangle.height = cords.height; } if (cords.x !== undefined) { rectangle.x = cords.x; } if (cords.y !== undefined) { rectangle.y = cords.y; } $wrapper.style.left = rectangle.x + 'px'; $wrapper.style.width = rectangle.width + 'px'; $wrapper.style.top = rectangle.y + 'px'; $wrapper.style.height = rectangle.height + 'px'; $black_left.style.width = rectangle.x + 'px'; $black_right.style.width = global_width - (rectangle.x + rectangle.width) + 'px'; $black_right.style.left = rectangle.x + rectangle.width + 'px'; $black_top.style.height = rectangle.y + 'px'; $black_top.style.left = rectangle.x + 'px'; $black_top.style.width = rectangle.width + 'px'; $black_bottom.style.height = global_height - (rectangle.y + rectangle.height) + 'px'; $black_bottom.style.top = rectangle.y + rectangle.height + 'px'; $black_bottom.style.left = rectangle.x + 'px'; $black_bottom.style.width = rectangle.width + 'px'; return true; } function checkPosition(event) { let x = event.clientX; let y = event.clientY; if (x > rectangle.x - 12 && x < rectangle.x + 12) { return 'left'; } if (x > (rectangle.width + rectangle.x) - 12 && x < (rectangle.width + rectangle.x) + 12) { return 'right'; } if (y > rectangle.y - 12 && y < rectangle.y + 12) { return 'top'; } if (y > (rectangle.height + rectangle.y) - 12 && y < (rectangle.height + rectangle.y) + 12) { return 'bottom'; } if ((x > (rectangle.x + rectangle.width / 2 - 32) && x < (rectangle.x + rectangle.width / 2 + 32)) && (y > (rectangle.y + rectangle.height / 2 - 32) && y < (rectangle.y + rectangle.height / 2 + 32)) && ((drug_direct === '') || (drug_direct === 'center'))) { return 'center'; } } function calcMovement(x, y) { switch (drug_direct) { case 'left': return { x: x, width: global_width - rectangle.x - x }; case 'right': return { x: global_width - x, width: x - rectangle.x }; case 'top': return { y: y, height: global_height - rectangle.y - y }; case 'bottom': return { y: global_height - y, height: y - rectangle.y }; case 'center': return { x: x - rectangle.width / 2, y: y - rectangle.height / 2 }; default: return false; } } function sendViaIPC() { ipcRenderer.send('asynchronous-message', rectangle); } function init() { document.body.addEventListener('mousedown', function (event) { drug_direct = checkPosition(event); }); document.body.addEventListener('mouseup', function () { drug_direct = ''; sendViaIPC(); }); document.body.addEventListener('mousemove', function (event) { const property = checkPosition(event); switch (property) { case 'left': document.body.style.cursor = 'e-resize'; break; case 'right': document.body.style.cursor = 'e-resize'; break; case 'top': document.body.style.cursor = 'n-resize'; break; case 'bottom': document.body.style.cursor = 'n-resize'; break; case 'center': document.body.style.cursor = 'move'; break; default: document.body.style.cursor = 'auto'; } if (drug_direct === '') { return false; } let new_cords = calcMovement(event.clientX, event.clientY); if (new_cords && ((new_cords.width >= 128) || (new_cords.height >= 128))) { move(new_cords); } if (drug_direct === 'center') { if (new_cords.x > 0 && rectangle.width + new_cords.x < global_width) { move({x: new_cords.x}); } if (new_cords.y > 0 && rectangle.height + new_cords.y < global_height) { move({y: new_cords.y}); } } }); } init(); }()); <file_sep>/SERVER/index.php <?php define('WEBSITE_URL', 'http://work.local/uploads/'); $fn = (isset($_SERVER['HTTP_X_FILENAME']) ? $_SERVER['HTTP_X_FILENAME'] : false); $name = uniqid() . '.webm'; if($fn) { file_put_contents( 'uploads/' . $name, file_get_contents('php://input') ); echo WEBSITE_URL . $name; exit(); } else { echo 'error'; } ?> <file_sep>/README.md # in-recorder Just small screen recorder that uploads video to server
ec7da3056ecdbd2244dc0036998b5e9a9ccf10e0
[ "JavaScript", "Markdown", "PHP" ]
5
JavaScript
svyatik/in-recorder
b03e7c176a9de165164496344b281d5395f31ebc
297dda513b91d524e35e9312322c83c22b6163b2
refs/heads/master
<file_sep>import sys from PySide import QtCore, QtGui from database import Database class MainWindow(QtGui.QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.initUI() def initUI(self): self.setGeometry(200, 200, 640, 480) self.menu = self.menuBar() self.cw = MainWidget(self) self.setCentralWidget(self.cw) self.status = self.statusBar() self.show() def setDatabase(self, nameFile): self.db = Database(nameFile) self.cw.setDatabase(self.db) class MainWidget(QtGui.QWidget): def __init__(self, parent): super(MainWidget, self).__init__(parent) self.layoutMain = QtGui.QVBoxLayout(self) self.layoutDataBrowser = QtGui.QHBoxLayout() self.comboTable = QtGui.QComboBox() self.comboTable.currentIndexChanged.connect(self.tableChange) # self.comboTable.activated.connect(self.tableReload) self.buttonNewRecord = QtGui.QPushButton('New Record') self.buttonNewRecord.clicked.connect(self.openAddRecordDialog) self.buttonDeleteRecord = QtGui.QPushButton('Delete Record') self.layoutDataBrowser.addWidget(QtGui.QLabel('Table:')) self.layoutDataBrowser.addWidget(self.comboTable) self.layoutDataBrowser.addWidget(self.buttonNewRecord) self.layoutDataBrowser.addWidget(self.buttonDeleteRecord) self.layoutMain.addLayout(self.layoutDataBrowser) self.tableView = QtGui.QTableView() self.layoutMain.addWidget(self.tableView) self.setLayout = self.layoutMain def tableChange(self, index): self.setTableModel(self.db.tables[index]) # def tableReload(self): # self.comboTable.clear() # self.comboTable.addItems(self.db.tables) def setDatabase(self, database): self.db = database self.setTableModel(self.db.tables[0]) self.comboTable.addItems(self.db.tables) self.comboTable.setCurrentIndex(0) def setTableModel(self, nameTable): model = self.db.getTableModel(nameTable) self.tableView.setModel(model) #self.tableView.show() def openAddRecordDialog(self): indexTable = self.comboTable.currentIndex() fields = self.db.getFields(self.db.tables[indexTable]) self.wAddRecord = AddRecord(fields) self.wAddRecord.applyClicked.connect(self.addRecord) def addRecord(self, text): print(text) class AddRecord(QtGui.QWidget): applyClicked = QtCore.Signal((tuple,)) def __init__(self, fields): super(AddRecord, self).__init__() self.fields = fields self.initUI() def initUI(self): self.mainLayout = QtGui.QVBoxLayout() self.layoutFields = QtGui.QGridLayout() for i in range(1, self.fields.count()): nameField = self.fields.field(i).name() label = QtGui.QLabel('{0:s}:'.format(nameField)) text = QtGui.QLineEdit() self.layoutFields.addWidget(label, i, 0, \ alignment=QtCore.Qt.AlignRight) self.layoutFields.addWidget(text, i, 1) self.mainLayout.addLayout(self.layoutFields) self.buttonApply = QtGui.QPushButton('Apply') self.buttonApply.clicked.connect(self.addRecord) self.mainLayout.addWidget(self.buttonApply) self.setLayout(self.mainLayout) self.show() def addRecord(self): self.applyClicked.emit(('this is a test', 10, )) def main(nameDatabase): app = QtGui.QApplication(sys.argv) mw = MainWindow() mw.setDatabase(nameDatabase) sys.exit(app.exec_()) if __name__ == '__main__': nameDatabase = '/home/fritzarch/Code/python/DatabaseBrowser/data' main(nameDatabase)<file_sep>DatabaseBrowser =============== SQLite data base browser in PySide <file_sep>from PySide import QtCore, QtSql class Database(object): statusOptions = ['OPEN', 'CLOSED', 'ERROR'] def __init__(self, name): self.connection = QtSql.QSqlDatabase('QSQLITE') self.connection.setDatabaseName(name) if not self.connection.open(): print('connection error') self._status = 2 else: self._status = 0 def getStatus(self): return self.statusOptions[self._status] status = property(getStatus) def getTables(self): return self.connection.tables() tables = property(getTables) def getFields(self, nameTable): return self.connection.record(nameTable) def __del__(self): self.connection.close() def getTableModel(self, nameTable): model = QtSql.QSqlTableModel(db=self.connection) model.setTable(nameTable) model.setEditStrategy(QtSql.QSqlTableModel.OnFieldChange) model.select() model.removeColumn(0) return model def main(): nameDatabase = '/home/fritzarch/Code/python/DataBaseBrowser/data' db = Database(nameDatabase) print(db.status) for t in db.getTables(): print(t) if __name__ == '__main__': main()
4688999da9c43d6e1d9e47413320980920de334a
[ "Markdown", "Python" ]
3
Python
fgroes/DatabaseBrowser
a113553040f7832d0be54b41c9d70404053b13bc
03e6dbeb46395b96e07aa7f9caa4d6137ab3b083
refs/heads/master
<file_sep>#!/usr/bin/env bash BAK_DIR='' USE_ZENITY=0 BAK_GPG=0 BAK_GK=0 BAK_CFG=0 LIMIT_SIZE=768 # 768MB SILENT=0 function usage { MSG="usage: $0 options\n \n This script make backup of .config and all exported GPG keys.\n GPG export for private keys and trusts asks your passphrase.\n \n /!\ \tWarning\t /!\ \n Backups have no permissions in order to use FAT32 filesystem.\n Everyone can reads thems, including _private keys_!\n Use external data storage like USB flash drive, and remove it after backup.\n \n OPTIONS:\n -a all\n Active all backup options\n -c config\n Backup ~/.config\n -d directory\n Choose directory where write backup ; required\n -g gpg\n Backup all GPG keys\n -h help\n Show this message\n -k keyring\n Backup gnome keyring\n -m max size\n Ignore directories greater; default 768 (0,75 GB)\n -s silent\n No output except errors\n -z zenity\n Use zenity instead echo on terminal\n" say $MSG exit } function fs_type { echo `df --output=fstype $@ | tail -n1` } function say { if [ ! "$SILENT" == 1 ] ; then if [ $USE_ZENITY == 1 ] ; then zenity --info --text "`echo $@ | sed 's/^\n//'`" else #MSG = "`echo $@ | sed 's/^\n//'`" echo -e $@ # | fold -s fi fi } function bak_gpg { mkdir --parents "$BAK_DIR/GPG.key/" gpg2 --export-ownertrust > "$BAK_DIR/GPG.key/$USER@$HOSTNAME_$NOW.trust" gpg2 --export --armor > "$BAK_DIR/GPG.key/$USER@$HOSTNAME_$NOW.pub" gpg2 --export-secret-keys --armor > "$BAK_DIR/GPG.key/$USER@$HOSTNAME_$NOW.priv" } function bak_gk { $RSYNC_CMD "/home/$USER/.local/share/keyrings/" "$BAK_DIR/keyrings" } function bak_cfg { DIR="/home/$USER/.config" pushd $DIR > /dev/null LIMIT_SIZE=$((1024*1024*$LIMIT_SIZE)) # Get file too big EXCLUDES='--exclude=cache* ' PRINTABLE_EXCLUDES='' for f in `du --threshold $LIMIT_SIZE -s * | cut -f2` ; do EXCLUDES="$EXCLUDES--exclude=$f " PRINTABLE_EXCLUDES="$PRINTABLE_EXCLUDES$f " done $RSYNC_CMD $EXCLUDES "$DIR" "$BAK_DIR" MSG="Backups finished to\n$BAK_DIR." if [ ! "$PRINTABLE_EXCLUDES" == '' ] ; then LIMIT_SIZE=$(($LIMIT_SIZE / 1024 / 1024)) MSG="$MSG\n\nDirectories excluded (over $LIMIT_SIZE Mio) :\n$PRINTABLE_EXCLUDES" fi say $MSG popd > /dev/null } while getopts “cad:ghknm:z” OPTION ; do case $OPTION in z) USE_ZENITY=1 ;; a) BAK_CFG=1 BAK_GK=1 BAK_GPG=1 ;; c) BAK_CFG=1 ;; d) BAK_DIR=`dirname $OPTARG/coincoin` ;; g) BAK_GPG=1 ;; h) usage ;; k) BAK_GK=1 ;; m) LIMIT_SIZE=$OPTARG ;; s) SILENT=1 ;; ?) usage ;; esac done if [ -z "$BAK_DIR" ] ; then say "Where have I to write backups?\n" usage fi if [ ! -d "$BAK_DIR" ] ; then say "Unable to read $BAK_DIR\n" exit 1 fi # FAT fs doesn't support links and permissions RSYNC_OPTIONS='-a --safe-links --del --ignore-errors' if [ -n "`fs_type $BAK_DIR | sed -n '/fat/ {p}'`" ] ; then RSYNC_OPTIONS="$RSYNC_OPTIONS --no-o --no-p --no-g --modify-window 1 " fi RSYNC_CMD="rsync $RSYNC_OPTIONS " BAK_DIR="$BAK_DIR/backup" NOW=`date +%F` say "Dont't forget: use external data storage like USB flash drive, and remove it after backup.\n" if [ "$DO_NOTHING" == 1 ] ; then exit ; fi if [ "$BAK_GPG" == 0 ] && [ "$BAK_GK" == 0 ] && [ "$BAK_CFG" == 0 ] ; then say "Nothing to backup.\n" usage fi # Backup gpg if [ "$BAK_GPG" == 1 ] ; then bak_gpg fi # Backup gnome-keyring if [ "$BAK_GK" == 1 ] ; then bak_gk fi # Backup gnome-keyring if [ "$BAK_CFG" == 1 ] ; then bak_cfg fi exit # comment if .gnupg, .mozilla and .icedove are not in your .config/ rsync -a --no-o --no-p --no-g --safe-links --modify-window 1 --del --stats --ignore-errors \ /home/$USER/.gnupg/ \ "$BAK_DIR/.config/gnupg" rsync -a --no-o --no-p --no-g --safe-links --modify-window 1 --del --stats --ignore-errors \ /home/$USER/.mozilla/ \ "$BAK_DIR/.config/mozilla" rsync -a --no-o --no-p --no-g --safe-links --modify-window 1 --del --stats --ignore-errors \ /home/$USER/.icedove/ \ "$BAK_DIR/.config/icedove"
02453adde1664d2345b52a3d38b986cada23c1d1
[ "Shell" ]
1
Shell
Luvwahraan/backup.sh
4f4c5ef56a3a0029ec47f82b92eed9ce2630cec2
bc90d1e0cf09bc642af849c33dadf4aac96f9a0c
refs/heads/master
<repo_name>vatsalsharma376/plaidC<file_sep>/client/src/components/layout/Navbar.js import React, { useState } from "react"; //import react pro sidebar components import { ProSidebar, Menu, MenuItem, SidebarHeader, SidebarContent, } from "react-pro-sidebar"; //import icons from react icons import { IconContext } from "react-icons"; import { AiOutlineDollarCircle } from "react-icons/ai"; import { AiOutlineDashboard } from "react-icons/ai" //import sidebar css from react-pro-sidebar module and our custom css import "react-pro-sidebar/dist/css/styles.css"; import "./Header.css"; import Dash from "../dashboard/Dash"; import Template from "../dashboard/Template"; const Header = (props) => { //create initial menuCollapse state using useState hook const [menuCollapse, setMenuCollapse] = useState(false) //create a custom function that will change menucollapse state from false to true and true to false const menuIconClick = () => { //condition checking to change state from true to false and vice versa menuCollapse ? setMenuCollapse(false) : setMenuCollapse(true); }; const [showDash,setshowDash] = useState(true); return ( <div className="flex flex-row" > <div id="header"> <IconContext.Provider value={{ size:"30", className: "global-class-name" }}> {/* collapsed props to change menu size using menucollapse state */} <ProSidebar collapsed={menuCollapse}> <SidebarHeader> <div className="logotext"> {/* small and big change using menucollapse state */} <p>{menuCollapse ? "Plaid" : "Demo Plaid"}</p> </div> </SidebarHeader> <SidebarContent> <Menu iconShape="square"> {/* <MenuItem active={true} icon={<FiHome />}> Home </MenuItem> */} <MenuItem icon={<AiOutlineDashboard />} onClick={()=>setshowDash(true) }>Dashboard</MenuItem> <MenuItem icon={<AiOutlineDollarCircle />} onClick={()=>setshowDash(false)}>Transactions</MenuItem> </Menu> </SidebarContent> </ProSidebar> </IconContext.Provider> </div> <div className="ri8">{showDash===true ? <Dash {...props}/> : <Template {...props} />}</div> </div> ); }; export default Header;<file_sep>/client/src/components/dashboard/Transactions.js import React, { Component } from "react"; import PropTypes from "prop-types"; import PlaidLinkButton from "react-plaid-link-button"; import { connect } from "react-redux"; import { useEffect } from "react"; import { getTransactions, addAccount, deleteAccount } from "../../actions/accountActions"; import { logoutUser } from "../../actions/authActions"; // AiOutlineUser //import Transactions from "./Transactions"; const Transactions = (props) => { useEffect(()=> { const { accounts } = props; props.getTransactions(accounts); },[]) // Add account const handleOnSuccess = (token, metadata) => { const { accounts } = props; const plaidData = { public_token: token, metadata: metadata, accounts: accounts }; props.addAccount(plaidData); }; // Delete account const onDeleteClick = id => { const { accounts } = props; const accountData = { id: id, accounts: accounts }; props.deleteAccount(accountData); }; // Logout const onLogoutClick = e => { e.preventDefault(); props.logoutUser(); }; const { user, accounts } = props; const { transactions, transactionsLoading } = props.plaid; let accountItems = accounts.map(account => ( <li key={account._id} style={{ marginTop: "1rem" }}> <button style={{ marginRight: "1rem" }} onClick={onDeleteClick.bind(this, account._id)} className="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full" > <i className="material-icons">delete</i> </button> <b>{account.institutionName}</b> </li> )); // Setting up data table const transactionsColumns = [ { title: "Account", field: "account" }, { title: "Date", field: "date", type: "date", defaultSort: "desc" }, { title: "Name", field: "name" }, { title: "Amount", field: "amount", type: "numeric" }, { title: "Category", field: "category" } ]; //console.log(props.plaid); let transactionsData = []; setTimeout(transactions.forEach(function(account) { account.transactions.forEach(function(transaction) { transactionsData.push({ account: account.accountName, date: transaction.date, category: transaction.category[0], name: transaction.name, amount: transaction.amount }); }); }),2000); return ( <div className="flex flex-row "> <div className="ml-20 mainc"> <br/><br/><br/><br/> <h2 > <p className="font-bold text-5xl text-indigo-500">Welcome!</p> </h2> <p className="text-2xl smol"> Hey there, {user.name.split(" ")[0]} </p> <h5> <b className="text-3xl text-indigo-800">Linked Accounts</b> </h5> <p className="text-xl smol"> Add or remove your bank accounts below </p> <ul className="text-lg">{accountItems}</ul> <br/> <PlaidLinkButton buttonProps={{ className: "rounded px-4 py-2 text-xs border-2 border-blue-500 text-blue-500 hover:bg-blue-500 hover:text-blue-100 duration-300" }} plaidLinkProps={{ clientName: "YOUR_APP_NAME", key: "4508d464022e7606f19a772439b37c", env: "sandbox", product: ["transactions"], onSuccess: handleOnSuccess }} // onScriptLoad={() => setState({ loaded: true })} > Add Account </PlaidLinkButton> </div> </div> ); } Transactions.propTypes = { logoutUser: PropTypes.func.isRequired, getTransactions: PropTypes.func.isRequired, addAccount: PropTypes.func.isRequired, deleteAccount: PropTypes.func.isRequired, accounts: PropTypes.array.isRequired, plaid: PropTypes.object.isRequired, user: PropTypes.object.isRequired }; const mapStateToProps = state => ({ plaid: state.plaid }); export default connect( mapStateToProps, { logoutUser, getTransactions, addAccount, deleteAccount } )(Transactions); <file_sep>/client/src/components/dashboard/Template.js import React from "react"; import PropTypes from "prop-types"; import { connect } from "react-redux"; import { useEffect } from "react"; import {useState} from "react"; import axios from "axios"; import { getTransactions, addAccount, deleteAccount } from "../../actions/accountActions"; import { logoutUser } from "../../actions/authActions"; import MaterialTable from "material-table"; // https://mbrn.github.io/material-table/#/ import cal from "../../img/cal.png"; import Calendar from "react-calendar"; import 'react-calendar/dist/Calendar.css'; const Template = (props) => { let transactionsData = []; const [len,setLen] = useState(0); const [showtxn,setshowtxn] = useState([{}]); const [txnloading,settxnloading] = useState(true); // const populate = () => { const [cal1,setcal1] = useState(false); const [cal2,setcal2] = useState(false); const [date1,setdate1] = useState("Choose a starting date"); const [date2,setdate2] = useState("Choose an ending date"); // } const { accounts } = props.plaid; useEffect( ()=> { ( async () => { axios.post("/api/plaid/accounts/transactions",accounts).then((response) => { const transactions = response.data; // txn = [[]] transactions.forEach(function(account) { account.transactions.forEach(function(transaction) { transactionsData.push({ account: account.accountName, date: transaction.date, category: transaction.category[0], name: transaction.name, amount: transaction.amount }); }); }); //console.log(transactionsData); settxnloading(false); setLen(transactionsData.length); setshowtxn(transactionsData); }) //transactionsData.forEach((ex)=>console.log(ex)); works })(); },[]) // Setting up data table const transactionsColumns = [ { title: "Account", field: "account" }, { title: "Date", field: "date", type: "date", defaultSort: "desc" }, { title: "Name", field: "name" }, { title: "Amount", field: "amount", type: "numeric" }, { title: "Category", field: "category" } ]; //console.log(props.plaid); // transactionsData[0] -> object //console.log(props); return ( <div className="ml-20"> <br/> <br/> <br/> <form style={{width:"300px"}}> <label class="block text-gray-700 text-lg font-bold mb-2" for="from"> Start </label> <div className="flex flex-row"> <input class="shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline" id="start" type="text" placeholder={date1} readOnly /> <div> <img src={cal} alt="cal" width="45" height="45" style={{cursor:'pointer'}} onClick={()=>setcal1(!cal1)} /> </div> </div> {cal1 && <Calendar onClickDay={(newD)=>{setcal1(!cal1); setdate1(newD.toString().substring(4,15)) }} />} <label class="block text-gray-700 text-lg font-bold mb-2" for="end"> End </label> <div className="flex flex-row"> <input class="shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline" id="end" type="text" placeholder={date2} readOnly/> <div> <img src={cal} alt="cal" width="45" height="45" style={{cursor:'pointer'}} onClick={()=>setcal2(!cal2)}/> </div> </div> {cal2 && <Calendar onClickDay={(newD)=>{setcal2(!cal2); setdate2(newD.toString().substring(4,15)) }} />} </form> <h5> <b className="text-3xl text-indigo-800">Transactions</b> </h5> {txnloading ? ( <p className="text-xl text-gray-600">Fetching transactions...</p> ) : ( <> <p className="text-xl smol"> You have <b>{len}</b> transactions from your <b> {accounts.length}</b> linked {accounts.length > 1 ? ( <span> accounts </span> ) : ( <span> account </span> )} from the past 30 days </p> <MaterialTable columns={transactionsColumns} data={showtxn} title="Search Transactions" /> </> )} </div> ); } Template.propTypes = { logoutUser: PropTypes.func.isRequired, getTransactions: PropTypes.func.isRequired, addAccount: PropTypes.func.isRequired, deleteAccount: PropTypes.func.isRequired, accounts: PropTypes.array.isRequired, plaid: PropTypes.object.isRequired, user: PropTypes.object.isRequired }; const mapStateToProps = state => ({ plaid: state.plaid }); export default connect( mapStateToProps, { logoutUser, getTransactions, addAccount, deleteAccount } )(Template); <file_sep>/client/src/components/dashboard/Dash.js import React from "react"; import PlaidLinkButton from "react-plaid-link-button"; const Dash = (props) => { // Add account const handleOnSuccess = (token, metadata) => { const { accounts } = props; const plaidData = { public_token: token, metadata: metadata, accounts: accounts }; props.addAccount(plaidData); }; // Delete account const onDeleteClick = id => { const { accounts } = props; const accountData = { id: id, accounts: accounts }; props.deleteAccount(accountData); }; // Logout const { user, accounts } = props; let accountItems = accounts.map(account => ( <li key={account._id} style={{ marginTop: "1rem" }}> <button style={{ marginRight: "1rem" }} onClick={onDeleteClick.bind(this, account._id)} className="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full" > <i className="material-icons">delete</i> </button> <b>{account.institutionName}</b> </li> )); return ( <div className="ml-20 mainc"> <br/><br/><br/><br/> <h2 > <p className="font-bold text-5xl text-indigo-500">Welcome!</p> </h2> <p className="text-2xl smol"> Hey there, {user.name.split(" ")[0]} </p> <h5> <b className="text-3xl text-indigo-800">Linked Accounts</b> </h5> <p className="text-xl smol"> Add or remove your bank accounts below </p> <ul className="text-lg">{accountItems}</ul> <br/> <PlaidLinkButton buttonProps={{ className: "rounded px-4 py-2 text-xs border-2 border-blue-500 text-blue-500 hover:bg-blue-500 hover:text-blue-100 duration-300" }} plaidLinkProps={{ clientName: "YOUR_APP_NAME", key: "4508d464022e7606f19a772439b37c", env: "sandbox", product: ["transactions"], onSuccess: handleOnSuccess }} // onScriptLoad={() => setState({ loaded: true })} > Add Account </PlaidLinkButton> </div> ) } export default Dash <file_sep>/config/keys.js module.exports = { mongoURI: "mongodb+srv://vatsal:vatsal@cluster0.ujvzx.mongodb.net/Cluster0?retryWrites=true&w=majority", secretOrKey: "secret" }; <file_sep>/client/src/components/dashboard/Accounts.js import React from "react"; import PropTypes from "prop-types"; import { connect } from "react-redux"; import { useEffect } from "react"; import { getTransactions, addAccount, deleteAccount } from "../../actions/accountActions"; import { logoutUser } from "../../actions/authActions"; import Header from "../layout/Navbar"; import { FiLogOut } from "react-icons/fi"; // AiOutlineUser import { BiUserCircle } from "react-icons/bi"; const Accounts = (props) => { useEffect( ()=> { //const { accounts } = props; //props.getTransactions(accounts); //axios.post("/api/plaid/accounts/transactions",accounts).then((response) => { //props=response.data; //transactionsLoading=false; // populate(); },[]) // Add account // Delete account // Logout const onLogoutClick = e => { e.preventDefault(); props.logoutUser(); }; //console.log(props.plaid); return ( <> <div className="flex flex-row "> {/* {accounts.length>=1 && <Template {...props}/> } */} <div className=" hhw"><Header {...props}/></div> {/* <div className="dnmic"> <Router> <Switch> <Route exact path="/transactions" component={showT} /> <Route path="/dshbrd" component={showD} /> </Switch> </Router> </div> */} {/* <hr style={{ marginTop: "2rem", opacity: ".2" }} /> <h5> <b className="text-3xl text-indigo-800">Transactions</b> </h5> {transactionsLoading ? ( <p className="text-xl text-gray-600">Fetching transactions...</p> ) : ( <> <p className="text-xl smol"> You have <b>{transactionsData.length}</b> transactions from your <b> {accounts.length}</b> linked {accounts.length > 1 ? ( <span> accounts </span> ) : ( <span> account </span> )} from the past 30 days </p> <MaterialTable columns={transactionsColumns} data={transactionsData} title="Search Transactions" /> </> */} <div className="ml-20 lgot"> <button onClick={onLogoutClick} className="inline-block mt-1 p-2 pl-5 pr-5 bg-transparent border-2 border-red-400 text-red-400 text-lg rounded-lg transition-colors duration-700 transform hover:bg-red-500 hover:text-gray-100 focus:border-4 focus:border-indigo-300" > <FiLogOut/> </button> <BiUserCircle className="w-10 h-10 mt-0 inline-block"/> </div> </div> </> ); } Accounts.propTypes = { logoutUser: PropTypes.func.isRequired, getTransactions: PropTypes.func.isRequired, addAccount: PropTypes.func.isRequired, deleteAccount: PropTypes.func.isRequired, accounts: PropTypes.array.isRequired, plaid: PropTypes.object.isRequired, user: PropTypes.object.isRequired }; const mapStateToProps = state => ({ plaid: state.plaid }); export default connect( mapStateToProps, { logoutUser, getTransactions, addAccount, deleteAccount } )(Accounts);
237f1551ed8f3c77cc4eabaccf6cf1088856d362
[ "JavaScript" ]
6
JavaScript
vatsalsharma376/plaidC
6500bb08904b36da7a4f9c6dadb8e7b375c766d0
3fee336aeee553ecf498df0f179c6681d99ee960
refs/heads/master
<repo_name>siniga/lete_drop<file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/adapters/CartAdapter.java package com.agnet.leteApp.fragments.main.adapters; import android.content.Context; import android.content.SharedPreferences; import android.graphics.Color; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.RelativeLayout; import android.widget.TextView; import com.agnet.leteApp.R; import com.agnet.leteApp.activities.MainActivity; import com.agnet.leteApp.fragments.main.sales.CartFragment; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.models.Cart; import com.google.android.material.snackbar.Snackbar; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Collections; import java.util.List; import androidx.recyclerview.widget.RecyclerView; /** * Created by alicephares on 8/5/16. */ public class CartAdapter extends RecyclerView.Adapter<CartAdapter.ViewHolder> { private List<Cart> products = Collections.emptyList(); private LayoutInflater inflator; private Context c; private int locateId; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private List productlist = new ArrayList(); private int cartItemCounts = 0; private int index = -1; private DatabaseHandler _dbHandler; private CartFragment cartFragment; private static int SPLASH_TIME_OUT = 5000; // Provide a suitable constructor (depends on the kind of dataset) public CartAdapter(Context c, List<Cart> products, CartFragment cartFragment) { this.products = products; this.inflator = LayoutInflater.from(c); this.c = c; this.cartFragment = cartFragment; _preferences = c.getSharedPreferences("SharedProductsData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _dbHandler = new DatabaseHandler(c); } // Create new views (invoked by the layout manager) @Override public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { // create a new view View v = inflator.inflate(R.layout.card_cart, parent, false); // set the view's size, margins, padding and layout parameters ViewHolder vh = new ViewHolder(c, v); return vh; } // Replace the contents of a view (invoked by the layout manager) @Override public void onBindViewHolder(final ViewHolder holder, final int position) { //get a position of a current saleItem final Cart currentProduct = products.get(position); final DecimalFormat formatter = new DecimalFormat("#,###,##0.00"); holder.mName.setText(currentProduct.getName()); holder.mPrice.setText("TZS:" +formatter.format(currentProduct.getAmount())); holder.mQnty.setText("" + currentProduct.getQuantity()); final int[] count = {currentProduct.getQuantity()}; //calculate total price for the product holder.mIncrementBtn.setOnClickListener((View.OnClickListener) v -> { holder.mQnty.setText("" + ++count[0]); Double total = (currentProduct.getItemPrice() * count[0]); _dbHandler.updateCart(new Cart(0,currentProduct.getName(),total,currentProduct.getProductId(),count[0],currentProduct.getItemPrice())); int totalQnty = _dbHandler.getTotalQnty(); ((CartFragment) cartFragment).setTotalCartAmnt(_dbHandler.getTotalPrice()); }); holder.mDecrementBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (count[0] > 1) { holder.mQnty.setText("" + --count[0]); Double total = (currentProduct.getItemPrice() * count[0]); _dbHandler.updateCart(new Cart(0,currentProduct.getName(),total,currentProduct.getProductId(),count[0],currentProduct.getItemPrice())); ((CartFragment) cartFragment).setTotalCartAmnt(_dbHandler.getTotalPrice()); } } }); holder.mRemoveCartProductBtn.setColorFilter(Color.parseColor("#df352e")); holder.mRemoveCartProductBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(final View v) { Snackbar snackbar = Snackbar .make(v, "Delete Product?", Snackbar.LENGTH_LONG).setActionTextColor(Color.parseColor("#fbbe02")) .setAction("Delete", new View.OnClickListener() { @Override public void onClick(View view) { Snackbar snackbar1 = Snackbar.make(view, "Product is deleted!", Snackbar.LENGTH_SHORT); snackbar1.show(); // _dbHandler.deleteCartById(currentProduct.getId()); ((CartFragment) cartFragment).setTotalCartAmnt(_dbHandler.getTotalPrice()); removeAt(position); int totalQnty = _dbHandler.getTotalQnty(); } }); snackbar.show(); } }); } public static class ViewHolder extends RecyclerView.ViewHolder { public LinearLayout mWrapper; public RelativeLayout mQntyChangeBtn; public TextView mName, mPrice; public ImageView mImg, mRemoveCartProductBtn; public TextView mQnty, mSku; public Button mDecrementBtn, mIncrementBtn; public ViewHolder(Context context, View view) { super(view); mWrapper = view.findViewById(R.id.shop_wrapper); mName = view.findViewById(R.id.name); mPrice = view.findViewById(R.id.price); mImg = view.findViewById(R.id.product_img); mQnty = view.findViewById(R.id.quantity); // mQntyChangeBtn = view.findViewById(R.id.qnty_change_btn_wrapper); mDecrementBtn = view.findViewById(R.id.quantity_view_remove); mIncrementBtn = view.findViewById(R.id.quantity_view_add); mRemoveCartProductBtn = view.findViewById(R.id.remove_cart_product); mSku = view.findViewById(R.id.sku); } } @Override public int getItemCount() { return products.size(); } public void filterList(List<Cart> filterdProducts) { this.products = filterdProducts; notifyDataSetChanged(); } public int getImage(String imageName) { int drawableResourceId = c.getResources().getIdentifier(imageName, "drawable", c.getPackageName()); return drawableResourceId; } public void removeAt(int position) { products.remove(position); notifyItemRemoved(position); notifyItemRangeChanged(position, products.size()); } }<file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/dialogs/QrcodeBtmSheet.java package com.agnet.leteApp.fragments.main.dialogs; import android.content.Context; import android.content.SharedPreferences; import android.os.Bundle; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.TextView; import android.widget.Toast; import androidx.annotation.Nullable; import com.agnet.leteApp.R; import com.agnet.leteApp.fragments.QrcodeScannerFragment; import com.agnet.leteApp.fragments.main.HomeFragment; import com.agnet.leteApp.fragments.main.outlets.OutletSuccessFragment; import com.agnet.leteApp.fragments.main.sales.OrderBarcodeFragment; import com.agnet.leteApp.fragments.main.sales.OutletPhoneNumberFragment; import com.agnet.leteApp.fragments.main.sales.ProductsFragment; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.Outlet; import com.google.android.material.bottomsheet.BottomSheetDialogFragment; public class QrcodeBtmSheet extends BottomSheetDialogFragment { @Override public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { View v = inflater.inflate(R.layout.qr_code_btm_sheet, container, false); SharedPreferences preferences = getActivity().getSharedPreferences("SharedData", Context.MODE_PRIVATE); SharedPreferences.Editor _editor = preferences.edit(); DatabaseHandler dbHandler = new DatabaseHandler(getContext()); Button qrcodeBtn = v.findViewById(R.id.with_qrcode_btn); Button phoneBtn = v.findViewById(R.id.with_phone_btn); TextView cancelBtn = v.findViewById(R.id.cancel_btn); LinearLayout cancelPhoneLayoutBtn = v.findViewById(R.id.cancel_phone_layout_btn); Button regByPhoneBtn = v.findViewById(R.id.register_byPhone_btn); EditText phoneTxt = v.findViewById(R.id.phone_input); EditText nameTxt = v.findViewById(R.id.name_input); qrcodeBtn.setOnClickListener(v12 -> { new FragmentHelper(getActivity()).replaceWithbackStack(new OrderBarcodeFragment(),"OrderBarcodeFragment", R.id.fragment_placeholder); // dismiss(); }); phoneBtn.setOnClickListener(v1 -> { new FragmentHelper(getActivity()).replaceWithbackStack(new OutletPhoneNumberFragment(),"OutletPhoneNumberFragment", R.id.fragment_placeholder); // dismiss(); }); regByPhoneBtn.setOnClickListener(view -> { String phone = phoneTxt.getText().toString(); String name = nameTxt.getText().toString(); if(name.isEmpty()){ Toast.makeText(getContext(), "Ingiza jina la mteja!", Toast.LENGTH_LONG).show(); }else if(phone.isEmpty()){ Toast.makeText(getContext(), "Ingiza namba ya simu!", Toast.LENGTH_LONG).show(); }else { dbHandler.createOutlet(new Outlet( 0,name,phone, "","" )); dismiss(); new FragmentHelper(getActivity()).replaceWithbackStack(new ProductsFragment(), "ProductsFragment", R.id.fragment_placeholder); } }); cancelPhoneLayoutBtn.setOnClickListener(view -> new FragmentHelper(getActivity()).replaceWithbackStack(new HomeFragment(),"HomeFragment", R.id.fragment_placeholder)); cancelBtn.setOnClickListener(view -> new FragmentHelper(getActivity()).replaceWithbackStack(new HomeFragment(),"HomeFragment", R.id.fragment_placeholder)); return v; } }<file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/mapping/MappingQuestionnaireFragment.java package com.agnet.leteApp.fragments.main.mapping; import android.annotation.SuppressLint; import android.content.Context; import android.content.SharedPreferences; import android.content.res.ColorStateList; import android.graphics.Color; import android.graphics.Typeface; import android.location.Address; import android.location.Geocoder; import android.os.Build; import android.os.Bundle; import android.text.Editable; import android.text.InputType; import android.text.TextWatcher; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.CheckBox; import android.widget.CompoundButton; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.ProgressBar; import android.widget.RadioButton; import android.widget.RadioGroup; import android.widget.Spinner; import android.widget.TextView; import android.widget.Toast; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.fragments.main.SuccessFragment; import com.agnet.leteApp.fragments.main.adapters.FormAdapter; import com.agnet.leteApp.helpers.DateHelper; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.Answer; import com.agnet.leteApp.models.Form; import com.agnet.leteApp.models.Option; import com.agnet.leteApp.models.Quesionnaire; import com.agnet.leteApp.models.ResponseData; import com.agnet.leteApp.models.Sms; import com.agnet.leteApp.models.User; import com.agnet.leteApp.service.Endpoint; import com.android.volley.AuthFailureError; import com.android.volley.DefaultRetryPolicy; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.Response; import com.android.volley.RetryPolicy; import com.android.volley.VolleyError; import com.android.volley.VolleyLog; import com.android.volley.toolbox.JsonObjectRequest; import com.android.volley.toolbox.StringRequest; import com.google.android.gms.vision.text.Line; import com.google.gson.Gson; import org.json.JSONException; import org.json.JSONObject; import java.text.NumberFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import static com.android.volley.VolleyLog.TAG; public class MappingQuestionnaireFragment extends Fragment { private FragmentActivity _c; private Gson _gson; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private ProgressBar _progressBar; private String Token; private User _user; private int _formId; private LinearLayout _questionnaireWrapper; private List post = new ArrayList(); private List<Answer> _answers = new ArrayList<>(); private Button _submitBtn; private LinearLayout _transparentLoader; private String _timeStarted; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_mapping_questionnaire, container, false); _c = getActivity(); _gson = new Gson(); _preferences = _c.getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _questionnaireWrapper = view.findViewById(R.id.questionnaire_main_wrapper); TextView formNameTxt = view.findViewById(R.id.form_name); TextView startedAtTxt = view.findViewById(R.id.started_at); _submitBtn = view.findViewById(R.id.submit_btn); _progressBar = view.findViewById(R.id.progress_bar); _transparentLoader = view.findViewById(R.id.transparent_loader); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); _formId = _preferences.getInt("FORM_ID", 0); String formName = _preferences.getString("FORM_NAME", null); _timeStarted = _preferences.getString("TIME_STARTED", null); formNameTxt.setText(formName); startedAtTxt.setText(_timeStarted); } catch (NullPointerException e) { } _submitBtn.setOnClickListener(view1 -> { try{ for (Answer answer : _answers) { if (answer.getAnswer().isEmpty() || answer.getAnswer().equals(null)) { Toast.makeText(_c, "Ingiza " + answer.getQuestion() + "!", Toast.LENGTH_SHORT).show(); if(answer.getQuestion().equals("Longitude") || answer.getQuestion().equals("Latitude") || answer.getQuestion().equals("location")){ answer.setAnswer("NULL"); }else { Toast.makeText(_c, "Ingiza " + answer.getQuestion() + "!", Toast.LENGTH_SHORT).show(); } return; } } }catch (NullPointerException e){ try { sendSMS(e.getMessage()); } catch (JSONException jsonException) { jsonException.printStackTrace(); } } postFormResults(); _submitBtn.setClickable(false); _transparentLoader.setVisibility(View.VISIBLE); _progressBar.setVisibility(View.VISIBLE); }); getQuestionnare(); return view; } public void createQuestionnaire(List<Quesionnaire> questions) { for (int i = 0; i <= questions.size() - 1; i++) { _answers.add(new Answer("", questions.get(i).getId())); switch (questions.get(i).getTypeId()) { case 1: addTextBox(questions.get(i), i); break; case 2: addRadioBox(questions.get(i).getQuestion(), questions.get(i).getOptions(), i); break; case 3: addSelectBox(questions.get(i).getQuestion(), questions.get(i).getOptions(), i); break; case 4: addCheckbox(questions.get(i).getQuestion(), questions.get(i).getOptions(), i); break; case 5: addnumericBox(questions.get(i).getQuestion(), i); break; case 9: addLongitude(questions.get(i).getQuestion(), i); break; case 10: addLatitude(questions.get(i).getQuestion(), i); break; case 11: addStartTime(questions.get(i).getQuestion(), i); break; case 12: addCompleteTime(questions.get(i).getQuestion(), i); break; case 13: addLocation(questions.get(i).getQuestion(), i); break; case 14: addPostedBy(questions.get(i).getQuestion(), i); break; default: break; } } } public void addTextBox(Quesionnaire question, int indx) { LinearLayout parent = createControlParent(); parent.addView(createHeader(question.getQuestion())); LinearLayout.LayoutParams params = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.MATCH_PARENT, 110 ); params.setMargins(0, 20, 0, 0); EditText textbox = new EditText(_c); textbox.setLayoutParams(params); textbox.setBackgroundResource(R.drawable.round_corners_with_stroke_grey); textbox.setHint("Ingiza jibu hapa"); textbox.setTextSize(14); textbox.setPadding(25, 10, 10, 10); _answers.get(indx).setQuestion(question.getQuestion()); textbox.addTextChangedListener(new TextWatcher() { @Override public void afterTextChanged(Editable mEdit) { String text = mEdit.toString(); _answers.get(indx).setAnswer(text); } public void beforeTextChanged(CharSequence s, int start, int count, int after) { } public void onTextChanged(CharSequence s, int start, int before, int count) { } }); parent.addView(textbox); } public void addRadioBox(String question, List<Option> options, int indx) { LinearLayout.LayoutParams modifyParent = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.MATCH_PARENT, LinearLayout.LayoutParams.WRAP_CONTENT ); LinearLayout parent = createControlParent(); parent.setLayoutParams(modifyParent); parent.setMinimumHeight(300); parent.addView(createHeader(question)); RadioGroup btnWrapper = new RadioGroup(_c); btnWrapper.setLayoutParams(modifyParent); btnWrapper.setPadding(0, 0, 0, 60); for (Option option : options) { LinearLayout.LayoutParams radioParam = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.WRAP_CONTENT, 100 ); radioParam.setMargins(0, 0, 0, 10); RadioButton rdbtn = new RadioButton(_c); rdbtn.setText(option.getOption()); rdbtn.setId(option.getId()); if (Build.VERSION.SDK_INT >= 21) { ColorStateList colorStateList = new ColorStateList( new int[][]{ new int[]{-android.R.attr.state_enabled}, //disabled new int[]{android.R.attr.state_enabled} //enabled }, new int[]{ Color.parseColor("#FFFFFF") //disabled , Color.parseColor("#001689") //enabled } ); rdbtn.setButtonTintList(colorStateList);//set the color tint list // radio.invalidate(); //could not be necessary } btnWrapper.addView(rdbtn); } _answers.get(indx).setQuestion(question); btnWrapper.setOnCheckedChangeListener(new RadioGroup.OnCheckedChangeListener() { @Override public void onCheckedChanged(RadioGroup radioGroup, int i) { radioGroup.findViewById(i); int index = radioGroup.indexOfChild(getView().findViewById(radioGroup.getCheckedRadioButtonId())); RadioButton r = (RadioButton) radioGroup.getChildAt(index); String selectedtext = r.getText().toString(); _answers.get(indx).setAnswer(selectedtext); } }); parent.addView(btnWrapper); } public void addSelectBox(String question, List<Option> options, int indx) { LinearLayout parent = createControlParent(); parent.addView(createHeader(question)); LinearLayout.LayoutParams params = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.MATCH_PARENT, 110 ); params.setMargins(0, 30, 0, 10); Spinner spinner = new Spinner(_c); ArrayAdapter optionAdapter = new ArrayAdapter(_c, android.R.layout.simple_spinner_dropdown_item, options); spinner.setAdapter(optionAdapter); spinner.setBackgroundResource(R.drawable.round_corners_with_stroke_grey); spinner.setLayoutParams(params); spinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { @Override public void onItemSelected(AdapterView<?> parent, View view, int position, long id) { // Get the value selected by the user // e.g. to store it as a field or immediately call a method Option option = (Option) parent.getSelectedItem(); _answers.get(indx).setAnswer(option.getOption()); } @Override public void onNothingSelected(AdapterView<?> parent) { } }); parent.addView(spinner); } public void addCheckbox(String question, List<Option> options, int indx) { LinearLayout parent = createControlParent(); parent.addView(createHeader(question)); for (Option option : options) { LinearLayout.LayoutParams checkboxParam = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.WRAP_CONTENT, 100 ); CheckBox checkBox = new CheckBox(_c); checkBox.setText(option.getOption()); checkBox.setTextColor(Color.parseColor("#001689")); checkBox.setButtonDrawable(R.drawable.custom_checkbox_colors); parent.addView(checkBox); _answers.get(indx).setQuestion(question); checkBox.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() { @Override public void onCheckedChanged(CompoundButton compoundButton, boolean b) { _answers.get(indx).setAnswer(compoundButton.getText().toString()); } }); } } public void addnumericBox(String question, int indx) { LinearLayout parent = createControlParent(); parent.addView(createHeader(question)); LinearLayout.LayoutParams params = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.MATCH_PARENT, 110 ); params.setMargins(0, 30, 0, 0); EditText textbox = new EditText(_c); textbox.setLayoutParams(params); textbox.setBackgroundResource(R.drawable.round_corners_with_stroke_grey); textbox.setInputType(InputType.TYPE_CLASS_NUMBER); textbox.setHint("Ingiza jibu hapa"); textbox.setTextSize(14); textbox.setPadding(25, 10, 10, 10); _answers.get(indx).setQuestion(question); textbox.addTextChangedListener(new TextWatcher() { @Override public void afterTextChanged(Editable mEdit) { String text = mEdit.toString(); _answers.get(indx).setAnswer(text); } public void beforeTextChanged(CharSequence s, int start, int count, int after) { } public void onTextChanged(CharSequence s, int start, int before, int count) { } }); parent.addView(textbox); } private void addLatitude(String question, int i) { _answers.get(i).setQuestion(question); _answers.get(i).setAnswer(_preferences.getString("mLONGITUDE", null)); } private void addLongitude(String question, int i) { _answers.get(i).setQuestion(question); _answers.get(i).setAnswer(_preferences.getString("mLATITUDE", null)); } private void addStartTime(String question, int i) { _answers.get(i).setQuestion(question); _answers.get(i).setAnswer(DateHelper.getCurrentDate()+" "+_timeStarted); } private void addCompleteTime(String question, int i) { _answers.get(i).setQuestion(question); _answers.get(i).setAnswer(DateHelper.getCurrentDate()+" "+DateHelper.getCurrentTime()); } private void addLocation(String question, int i) { //Get address base on location try { Geocoder geo = new Geocoder(_c.getApplicationContext(), Locale.getDefault()); List<Address> addresses = geo.getFromLocation(Double.parseDouble(_preferences.getString("mLATITUDE", null)), Double.parseDouble( _preferences.getString("mLONGITUDE", null)), 1); if (addresses.isEmpty()) { } else { if (addresses.size() > 0) { _answers.get(i).setQuestion(question); _answers.get(i).setAnswer(addresses.get(0).getSubAdminArea()); } } } catch (Exception e) { e.printStackTrace(); } } private void addPostedBy(String question, int i) { _answers.get(i).setQuestion(question); _answers.get(i).setAnswer(_user.getName()); } public TextView createHeader(String question) { LinearLayout.LayoutParams params = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.WRAP_CONTENT, LinearLayout.LayoutParams.WRAP_CONTENT ); TextView textView = new TextView(_c); textView.setText(question); textView.setTextColor(Color.parseColor("#001689")); textView.setTextSize(20); textView.setTypeface(null, Typeface.BOLD); textView.setPadding(0, 0, 50, 0); textView.setLayoutParams(params); return textView; } public LinearLayout createControlParent() { LinearLayout.LayoutParams params = new LinearLayout.LayoutParams( LinearLayout.LayoutParams.MATCH_PARENT, 300 ); LinearLayout controlParent = new LinearLayout(_c); controlParent.setLayoutParams(params); controlParent.setPadding(25, 25, 25, 15); controlParent.setOrientation(LinearLayout.VERTICAL); _questionnaireWrapper.addView(controlParent); return controlParent; } public void getQuestionnare() { Endpoint.setUrl("form/" + _formId); String url = Endpoint.getUrl(); _transparentLoader.setVisibility(View.VISIBLE); _progressBar.setVisibility(View.VISIBLE); StringRequest postRequest = new StringRequest(Request.Method.GET, url, response -> { ResponseData res = _gson.fromJson(response, ResponseData.class); if (res.getCode() == 200) { Form formList = res.getForm(); List<Quesionnaire> questions = formList.getQuestions(); // Log.d("HERERESPONSE", _gson.toJson(formList.getQuestions())); createQuestionnaire(questions); _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); } }, new Response.ErrorListener() { @Override public void onErrorResponse(VolleyError error) { error.printStackTrace(); _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_SHORT).show(); } } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } public void postFormResults() { Endpoint.setUrl("results"); String url = Endpoint.getUrl(); StringRequest postRequest = new StringRequest(Request.Method.POST, url, response -> { ResponseData res = _gson.fromJson(response, ResponseData.class); if (res.getCode() == 201) { new FragmentHelper(_c).replace(new MappingSuccessFragment(), "MappingSuccessFragment", R.id.fragment_placeholder); } else { Toast.makeText(_c, "Kuna tatizo la mtandao, jaribu tena", Toast.LENGTH_SHORT).show(); } _submitBtn.setClickable(true); _transparentLoader.setVisibility(View.VISIBLE); _progressBar.setVisibility(View.VISIBLE); }, error -> { error.printStackTrace(); _submitBtn.setClickable(true); _transparentLoader.setVisibility(View.VISIBLE); _progressBar.setVisibility(View.VISIBLE); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_SHORT).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } @Override protected Map<String, String> getParams() { Map<String, String> params = new HashMap<String, String>(); params.put("results", _gson.toJson(_answers)); params.put("user_id", "" + _user.getId()); params.put("form_id", "" + _formId); params.put("lat", _preferences.getString("mLATITUDE", null)); params.put("lng", _preferences.getString("mLONGITUDE", null)); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } public void sendSMS(String crush) throws JSONException { String url = "https://messaging-service.co.tz/api/sms/v1/text/single"; Sms txtMsg = new Sms("NEXTSMS", "0763682987", crush); String smsAuth = getResources().getString(R.string.sms_auth); Log.d("HERSTRING", _gson.toJson(txtMsg)); JsonObjectRequest jsonObjReq = new JsonObjectRequest(Request.Method.POST, url, new JSONObject(_gson.toJson(txtMsg)), new Response.Listener<JSONObject>() { @Override public void onResponse(JSONObject response) { Log.d(TAG, response.toString()); } }, new Response.ErrorListener() { @Override public void onErrorResponse(VolleyError error) { VolleyLog.d(TAG, "Error: " + error.getMessage()); } }) { //headers @Override public Map<String, String> getHeaders() throws AuthFailureError { HashMap<String, String> headers = new HashMap<String, String>(); headers.put("Authorization", "Basic Um91dGVQcm86emV5MTIzMzIxUVE="); headers.put("Content-Type", "application/json"); headers.put("Accept", "application/json"); return headers; } }; mSingleton.getInstance(_c).addToRequestQueue(jsonObjReq); jsonObjReq.setRetryPolicy(new DefaultRetryPolicy(DefaultRetryPolicy.DEFAULT_TIMEOUT_MS * 2, DefaultRetryPolicy.DEFAULT_MAX_RETRIES, DefaultRetryPolicy.DEFAULT_BACKOFF_MULT)); } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/outlets/NewBarcodeFragment.java package com.agnet.leteApp.fragments.main.outlets; import android.annotation.SuppressLint; import android.content.Context; import android.content.SharedPreferences; import android.location.Address; import android.location.Geocoder; import android.os.Bundle; import android.util.Log; import android.util.SparseArray; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.RelativeLayout; import android.widget.Spinner; import android.widget.TextView; import android.widget.Toast; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.fragments.main.mapping.MappingFormListFragment; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.CustomerType; import com.agnet.leteApp.models.ResponseData; import com.agnet.leteApp.models.User; import com.agnet.leteApp.service.Endpoint; import com.android.volley.DefaultRetryPolicy; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.Response; import com.android.volley.VolleyError; import com.android.volley.toolbox.StringRequest; import com.google.android.gms.samples.vision.barcodereader.BarcodeCapture; import com.google.android.gms.samples.vision.barcodereader.BarcodeGraphic; import com.google.android.gms.vision.barcode.Barcode; import com.google.android.material.bottomnavigation.BottomNavigationView; import com.google.gson.Gson; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import xyz.belvi.mobilevisionbarcodescanner.BarcodeRetriever; public class NewBarcodeFragment extends Fragment implements BarcodeRetriever { private FragmentActivity _c; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private Gson _gson; private User _user; private String Token; private String _phone, _name, _vfdId; private int _vfdType, _outletTypeId; private BarcodeCapture barcodeCapture; private String _lng, _lat; private String _location; private int _projectId; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_new_barcode, container, false); _c = getActivity(); barcodeCapture = (BarcodeCapture) getChildFragmentManager().findFragmentById(R.id.barcode); barcodeCapture.setRetrieval(this); _preferences = _c.getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _gson = new Gson(); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); _phone = _preferences.getString("PHONE", null); _name = _preferences.getString("NAME", null); _vfdType = _preferences.getInt("VFD_TYPE", 0); _vfdId = _preferences.getString("VFD_ID", null); _outletTypeId = _preferences.getInt("OUTLET_TYPE_ID", 0); _lng = _preferences.getString("mLONGITUDE", null); _lat = _preferences.getString("mLATITUDE", null); _projectId = _preferences.getInt("PROJECT_ID",0); } catch (NullPointerException e) { } //Get address base on location try { Geocoder geo = new Geocoder(_c.getApplicationContext(), Locale.getDefault()); List<Address> addresses = geo.getFromLocation(Double.parseDouble(_lat), Double.parseDouble(_lng), 1); if (addresses.isEmpty()) { } else { if (addresses.size() > 0) { _location = addresses.get(0).getSubAdminArea(); } } } catch (Exception e) { e.printStackTrace(); } return view; } @Override public void onRetrieved(Barcode barcode) { // Log.d(TAG, "Barcode read: " + barcode.displayValue); _c.runOnUiThread(new Runnable() { @Override public void run() { saveOutlet(barcode.displayValue); } }); } @Override public void onRetrievedMultiple(Barcode closetToClick, List<BarcodeGraphic> barcode) { } @Override public void onBitmapScanned(SparseArray<Barcode> sparseArray) { } @Override public void onRetrievedFailed(String reason) { } @Override public void onPermissionRequestDenied() { } public void saveOutlet(String qrcode) { Endpoint.setUrl("outlet"); String url = Endpoint.getUrl(); StringRequest postRequest = new StringRequest(Request.Method.POST, url, response -> { ResponseData res = _gson.fromJson(response, ResponseData.class); if (res.getCode() == 409) { Toast.makeText(_c, "QR code imeshasajiliwa!", Toast.LENGTH_LONG).show(); } else { _editor.remove("PHONE"); _editor.remove("NAME"); _editor.remove("VFD_TYPE"); _editor.remove("VFD_ID"); _editor.remove("OUTLET_TYPE_ID"); _editor.commit(); barcodeCapture.stopScanning(); new FragmentHelper(_c).replace(new OutletSuccessFragment(), " OutletSuccessFragment", R.id.fragment_placeholder); } }, error -> { NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_LONG).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } @Override protected Map<String, String> getParams() { Map<String, String> params = new HashMap<String, String>(); params.put("lng", _lng); params.put("lat", _lat); params.put("phone", _phone); params.put("name", _name); params.put("outlet_type_id", "" + _outletTypeId); params.put("vfd_cust_type", "" + _vfdType); params.put("vfd_cust_id", "" + _vfdId); params.put("qr_code", qrcode); params.put("user_id", "" + _user.getId()); params.put("location", _location); params.put("projectId", "" + _projectId); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new DefaultRetryPolicy(DefaultRetryPolicy.DEFAULT_TIMEOUT_MS * 2, DefaultRetryPolicy.DEFAULT_MAX_RETRIES, DefaultRetryPolicy.DEFAULT_BACKOFF_MULT)); } } <file_sep>/app/src/main/java/com/agnet/leteApp/activities/LocationActivity.java package com.agnet.leteApp.activities; import android.Manifest; import android.app.AlertDialog; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.SharedPreferences; import android.net.Uri; import android.os.Build; import android.os.Bundle; import android.provider.Settings; import android.view.View; import android.widget.Button; import android.widget.TextView; import android.widget.Toast; import com.agnet.leteApp.R; import com.agnet.leteApp.helpers.OrderStatusHelper; import com.google.android.material.bottomsheet.BottomSheetBehavior; import com.karumi.dexter.Dexter; import com.karumi.dexter.MultiplePermissionsReport; import com.karumi.dexter.PermissionToken; import com.karumi.dexter.listener.DexterError; import com.karumi.dexter.listener.PermissionRequest; import com.karumi.dexter.listener.PermissionRequestErrorListener; import com.karumi.dexter.listener.multi.MultiplePermissionsListener; import com.mapbox.android.core.permissions.PermissionsListener; import com.mapbox.android.core.permissions.PermissionsManager; import com.mapbox.mapboxsdk.Mapbox; import com.mapbox.mapboxsdk.location.LocationComponent; import com.mapbox.mapboxsdk.location.LocationComponentActivationOptions; import com.mapbox.mapboxsdk.location.LocationComponentOptions; import com.mapbox.mapboxsdk.location.OnCameraTrackingChangedListener; import com.mapbox.mapboxsdk.location.OnLocationClickListener; import com.mapbox.mapboxsdk.location.modes.CameraMode; import com.mapbox.mapboxsdk.location.modes.RenderMode; import com.mapbox.mapboxsdk.maps.MapView; import com.mapbox.mapboxsdk.maps.MapboxMap; import com.mapbox.mapboxsdk.maps.OnMapReadyCallback; import com.mapbox.mapboxsdk.maps.Style; import java.util.List; import androidx.annotation.NonNull; import androidx.annotation.RequiresApi; import androidx.appcompat.app.AppCompatActivity; public class LocationActivity extends AppCompatActivity implements OnMapReadyCallback, OnLocationClickListener, PermissionsListener, OnCameraTrackingChangedListener { private BottomSheetBehavior _bottomSheetBehavior; private static final String TAG = "LocationActivity"; private PermissionsManager permissionsManager; private MapView mapView; private MapboxMap mapboxMap; private LocationComponent locationComponent; private boolean isInTrackingMode; @RequiresApi(api = Build.VERSION_CODES.M) @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); //Mapbox access token is configured here.This needs to be called either in your application // object or in the same activity which contains the mapview. Mapbox.getInstance(this, getString(R.string.mapbox_access_token)); // This contains the MapView in XML and needs to be called after the access token is configured. setContentView(R.layout.activity_location); mapView = findViewById(R.id.mapView); mapView.onCreate(savedInstanceState); mapView.getMapAsync(this); SharedPreferences preferences = getSharedPreferences("SharedData", Context.MODE_PRIVATE); SharedPreferences.Editor _editor = preferences.edit(); //Find bottom Sheet ID View bottomSheet = findViewById(R.id.bottom_sheet); BottomSheetBehavior _bottomSheetBehavior = BottomSheetBehavior.from(bottomSheet); //By default set BottomSheet Behavior as Collapsed and Height 0 _bottomSheetBehavior.setState(BottomSheetBehavior.STATE_COLLAPSED); _bottomSheetBehavior.setPeekHeight(0); _bottomSheetBehavior.setState(BottomSheetBehavior.STATE_EXPANDED); _bottomSheetBehavior.setPeekHeight(600); OrderStatusHelper orderStatusHelper = new OrderStatusHelper(this); if (!preferences.getString("NEW_ORDER_NO", null).equals(null)) { String orderNo = preferences.getString("NEW_ORDER_NO", null); int status = preferences.getInt("NEW_ORDER_STATUS", 0); TextView orderNoView = findViewById(R.id.order_no); orderNoView.setText(orderNo); Button orderStatus = findViewById(R.id.status_btn); orderStatusHelper.showButtonByStatus(orderStatus,status); } Button callBtn = findViewById(R.id.call_btn); callBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { String phoneNo = preferences.getString("PHONE_NO", null); Intent callIntent = new Intent(Intent.ACTION_CALL); callIntent.setData(Uri.parse("tel:" + phoneNo));//change the number startActivity(callIntent); } }); Button _cancelBtn = findViewById(R.id.cancel_order); _cancelBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { } }); } private void handleBottomSheetBehaviors() { //If you want to handle callback of Sheet Behavior you can use below code _bottomSheetBehavior.setBottomSheetCallback(new BottomSheetBehavior.BottomSheetCallback() { @Override public void onStateChanged(@NonNull View bottomSheet, int newState) { switch (newState) { case BottomSheetBehavior.STATE_COLLAPSED: //Log.d(TAG, "State Collapsed"); break; case BottomSheetBehavior.STATE_DRAGGING: // Log.d(TAG, "State Dragging"); break; case BottomSheetBehavior.STATE_EXPANDED: // Log.d(TAG, "State Expanded"); break; case BottomSheetBehavior.STATE_HIDDEN: // Log.d(TAG, "State Hidden"); break; case BottomSheetBehavior.STATE_SETTLING: // Log.d(TAG, "State Settling"); break; } } @Override public void onSlide(@NonNull View bottomSheet, float slideOffset) { } }); } @Override public void onBackPressed() { super.onBackPressed(); Intent intent = new Intent(this, MainActivity.class); startActivity(intent); finish(); } @Override public void onMapReady(@NonNull MapboxMap mapboxMap) { this.mapboxMap = mapboxMap; mapboxMap.setStyle(Style.LIGHT, new Style.OnStyleLoaded() { @Override public void onStyleLoaded(@NonNull Style style) { enableLocationComponent(style); } }); } @SuppressWarnings( {"MissingPermission"}) private void enableLocationComponent(@NonNull Style loadedMapStyle) { // Check if permissions are enabled and if not request if (PermissionsManager.areLocationPermissionsGranted(this)) { // Create and customize the LocationComponent's options LocationComponentOptions customLocationComponentOptions = LocationComponentOptions.builder(this) .elevation(5) .accuracyAlpha(.6f) // .accuracyColor(Color.RED) .foregroundDrawable(R.drawable.ic_add_black_24dp) .build(); // Get an instance of the component locationComponent = mapboxMap.getLocationComponent(); LocationComponentActivationOptions locationComponentActivationOptions = LocationComponentActivationOptions.builder(this, loadedMapStyle) .locationComponentOptions(customLocationComponentOptions) .build(); // Activate with options locationComponent.activateLocationComponent(locationComponentActivationOptions); // Enable to make component visible locationComponent.setLocationComponentEnabled(true); // Set the component's camera mode locationComponent.setCameraMode(CameraMode.TRACKING); // Set the component's render mode locationComponent.setRenderMode(RenderMode.COMPASS); // Add the location icon click listener locationComponent.addOnLocationClickListener(this); // Add the camera tracking listener. Fires if the map camera is manually moved. locationComponent.addOnCameraTrackingChangedListener(this); findViewById(R.id.back_to_camera_tracking_mode).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if (!isInTrackingMode) { isInTrackingMode = true; locationComponent.setCameraMode(CameraMode.TRACKING); locationComponent.zoomWhileTracking(16f); } else { Toast.makeText(LocationActivity.this, "Track has already been enabled", Toast.LENGTH_SHORT).show(); } } }); } else { permissionsManager = new PermissionsManager(this); permissionsManager.requestLocationPermissions(this); } } @SuppressWarnings( {"MissingPermission"}) @Override public void onLocationComponentClick() { if (locationComponent.getLastKnownLocation() != null) { Toast.makeText(this, String.format("my current location", locationComponent.getLastKnownLocation().getLatitude(), locationComponent.getLastKnownLocation().getLongitude()), Toast.LENGTH_LONG).show(); } } @Override public void onCameraTrackingDismissed() { isInTrackingMode = false; } @Override public void onCameraTrackingChanged(int currentMode) { // Empty on purpose } @Override public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) { permissionsManager.onRequestPermissionsResult(requestCode, permissions, grantResults); } @Override public void onExplanationNeeded(List<String> permissionsToExplain) { //TODO: explain why you need location permission // Toast.makeText(this,"user location permission explainaion", Toast.LENGTH_LONG).show(); } @Override public void onPermissionResult(boolean granted) { if (granted) { mapboxMap.getStyle(new Style.OnStyleLoaded() { @Override public void onStyleLoaded(@NonNull Style style) { enableLocationComponent(style); } }); } else { Toast.makeText(this, "Location permission is not granted", Toast.LENGTH_LONG).show(); finish(); } } @SuppressWarnings( {"MissingPermission"}) protected void onStart() { super.onStart(); mapView.onStart(); } @Override protected void onResume() { super.onResume(); mapView.onResume(); } @Override protected void onPause() { super.onPause(); mapView.onPause(); } @Override protected void onStop() { super.onStop(); mapView.onStop(); } @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); mapView.onSaveInstanceState(outState); } @Override protected void onDestroy() { super.onDestroy(); mapView.onDestroy(); } @Override public void onLowMemory() { super.onLowMemory(); mapView.onLowMemory(); } private void requestPermissions() { Dexter.withActivity(this) .withPermissions( Manifest.permission.READ_EXTERNAL_STORAGE, Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.ACCESS_FINE_LOCATION, Manifest.permission.CALL_PHONE) .withListener(new MultiplePermissionsListener() { @Override public void onPermissionsChecked(MultiplePermissionsReport report) { // check if all permissions are granted if (report.areAllPermissionsGranted()) { Toast.makeText(getApplicationContext(), "All permissions are granted!", Toast.LENGTH_SHORT).show(); } // check for permanent denial of any permission if (report.isAnyPermissionPermanentlyDenied()) { // show alert dialog navigating to Settings // showSettingsDialog(); } } @Override public void onPermissionRationaleShouldBeShown(List<PermissionRequest> permissions, PermissionToken token) { token.continuePermissionRequest(); } }). withErrorListener(new PermissionRequestErrorListener() { @Override public void onError(DexterError error) { Toast.makeText(getApplicationContext(), "Error occurred! ", Toast.LENGTH_SHORT).show(); } }) .onSameThread() .check(); } private void showSettingsDialog() { AlertDialog.Builder builder = new AlertDialog.Builder(LocationActivity.this); builder.setTitle("Need Permissions"); builder.setMessage("This app needs permission to use this feature. You can grant them in app settings."); builder.setPositiveButton("GOTO SETTINGS", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.cancel(); openSettings(); } }); builder.setNegativeButton("Cancel", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.cancel(); } }); builder.show(); } // navigating user to app settings private void openSettings() { Intent intent = new Intent(Settings.ACTION_APPLICATION_DETAILS_SETTINGS); Uri uri = Uri.fromParts("package", getPackageName(), null); intent.setData(uri); startActivityForResult(intent, 101); } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/sales/CartFragment.java package com.agnet.leteApp.fragments.main.sales; import android.Manifest; import android.annotation.SuppressLint; import android.app.Activity; import android.app.AlertDialog; import android.content.Context; import android.content.DialogInterface; import android.content.SharedPreferences; import android.content.pm.PackageManager; import android.os.Bundle; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.LinearLayout; import android.widget.ProgressBar; import android.widget.RelativeLayout; import android.widget.TextView; import android.widget.Toast; import androidx.core.app.ActivityCompat; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.fragments.main.adapters.CartAdapter; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.helpers.DateHelper; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.Cart; import com.agnet.leteApp.models.Invoice; import com.agnet.leteApp.models.InvoiceDetail; import com.agnet.leteApp.models.Order; import com.agnet.leteApp.models.Outlet; import com.agnet.leteApp.models.Receipt; import com.agnet.leteApp.models.ResponseData; import com.agnet.leteApp.models.User; import com.agnet.leteApp.models.Vfd; import com.agnet.leteApp.service.Endpoint; import com.android.volley.AuthFailureError; import com.android.volley.DefaultRetryPolicy; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.Response; import com.android.volley.RetryPolicy; import com.android.volley.VolleyError; import com.android.volley.VolleyLog; import com.android.volley.toolbox.JsonObjectRequest; import com.android.volley.toolbox.StringRequest; import com.google.android.material.bottomnavigation.BottomNavigationView; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import org.json.JSONException; import org.json.JSONObject; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import static com.android.volley.VolleyLog.TAG; public class CartFragment extends Fragment { private FragmentActivity _c; private RecyclerView _cartlist; private LinearLayoutManager _layoutManager; private DatabaseHandler _dbHandler; private TextView _cartTotalAmnt; private DecimalFormat _formatter; private List<Cart> _products; private LinearLayout _errorMsg; private Button _placeOrderBtn; private AlertDialog _alertDialog; private BottomNavigationView _navigation; private LinearLayout _btnHome; private RelativeLayout _openCartBtm; private SharedPreferences.Editor _editor; private SharedPreferences _preferences; private String Token; private User _user; private Gson _gson; private ProgressBar _progressBar; private LinearLayout _transparentLoader; private Button _placeOrderNoQrCodeBtn; private List<Order> _orders; private List<Outlet> _outlet; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_cart, container, false); _c = getActivity(); //initialize _preferences = getActivity().getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _progressBar = view.findViewById(R.id.progress_bar); _transparentLoader = view.findViewById(R.id.transparent_loader); _dbHandler = new DatabaseHandler(_c); _formatter = new DecimalFormat("#,###,##0.00"); _gson = new Gson(); //binding _cartTotalAmnt = view.findViewById(R.id.total_cart_amount); _openCartBtm = _c.findViewById(R.id.open_cart_wrapper); _errorMsg = view.findViewById(R.id.error_msg); _cartlist = view.findViewById(R.id.cart_list); _placeOrderBtn = view.findViewById(R.id.place_order_btn); _placeOrderNoQrCodeBtn = view.findViewById(R.id.place_order_no_qrcode); _layoutManager = new LinearLayoutManager(_c, RecyclerView.VERTICAL, false); _cartlist.setLayoutManager(_layoutManager); _products = _dbHandler.getCart(); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); _outlet = _dbHandler.getOutlets(); _orders =_dbHandler.getOrders(); } catch (NullPointerException e) { } CartAdapter adapter = new CartAdapter(_c, _products, this); _cartlist.setAdapter(adapter); _placeOrderBtn.setOnClickListener(view12 -> { if (_products.size() > 0) { // _progressBar.setVisibility(View.VISIBLE); // _transparentLoader.setVisibility(View.VISIBLE); // _placeOrderBtn.setClickable(false); /* if (PackageManager.PERMISSION_GRANTED == ActivityCompat.checkSelfPermission(_c, Manifest.permission.CAMERA)) { new FragmentHelper(_c).replaceWithbackStack(new OrderBarcodeFragment(), "OrderBarcodeFragment", R.id.fragment_placeholder); } else { requestWritePermission(_c); }*/ saveOrder(); } else { Toast.makeText(_c, "Kikapu hakina bidhaa!", Toast.LENGTH_SHORT).show(); } }); _placeOrderNoQrCodeBtn.setOnClickListener(view1 -> { if (_products.size() > 0) { _placeOrderNoQrCodeBtn.setClickable(false); new FragmentHelper(_c).replace(new OutletPhoneNumberFragment(), "OutletNumberFragment", R.id.fragment_placeholder); } else { Toast.makeText(_c, "Kikapu hakina bidhaa!", Toast.LENGTH_SHORT).show(); } }); return view; } public void setTotalCartAmnt(int totalCartAmnt) { _cartTotalAmnt.setText("" + _formatter.format(totalCartAmnt)); } @Override public void onResume() { super.onResume(); int totalPrice = _dbHandler.getTotalPrice(); _cartTotalAmnt.setText("" + _formatter.format(totalPrice)); } @Override public void onPause() { super.onPause(); _progressBar.setVisibility(View.GONE); _transparentLoader.setVisibility(View.GONE); } private static void requestWritePermission(final Context context) { if (ActivityCompat.shouldShowRequestPermissionRationale((Activity) context, Manifest.permission.CAMERA)) { new AlertDialog.Builder(context).setMessage("This app needs permission to use The phone Camera in order to activate the Scanner") .setPositiveButton("Allow", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { ActivityCompat.requestPermissions((Activity) context, new String[]{Manifest.permission.CAMERA}, 1); } }).show(); } else { ActivityCompat.requestPermissions((Activity) context, new String[]{Manifest.permission.CAMERA}, 1); } } public void saveOrder() { _transparentLoader.setVisibility(View.VISIBLE); _progressBar.setVisibility(View.VISIBLE); Endpoint.setUrl("order"); String url = Endpoint.getUrl(); StringRequest postRequest = new StringRequest(Request.Method.POST, url, response -> { _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); ResponseData res = _gson.fromJson(response, ResponseData.class); Toast.makeText(_c, "App inapakua Subiri...", Toast.LENGTH_SHORT).show(); if (res.getCode() == 201) { try { sendVfd(); } catch (JSONException e) { e.printStackTrace(); } } _placeOrderBtn.setClickable(true); }, error -> { error.printStackTrace(); _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); _placeOrderBtn.setClickable(true); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_SHORT).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } @Override protected Map<String, String> getParams() { Map<String, String> params = new HashMap<String, String>(); params.put("createdDate", DateHelper.getCurrentDate()); params.put("deviceTime", DateHelper.getCurrentDate() + " " + DateHelper.getCurrentTime()); params.put("userId", "" + _orders.get(0).getUserId()); params.put("orderNo", "" + _orders.get(0).getOrderNo()); params.put("lat",""+_orders.get(0).getLat()); params.put("lng", ""+_orders.get(0).getLng()); params.put("products", _gson.toJson(_dbHandler.getCart())); params.put("outletId", "1"); params.put("projectId", "" + _orders.get(0).getProjectId()); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } public void sendVfd() throws JSONException { String url = "http://tra.aggreyapps.com/apis/receive.php";// testing // String url = "http://vfd.aggreyapps.com/maxvfd-api/apis/receive.php";//live List<InvoiceDetail> invoiceDetails = new ArrayList<>(); for (Cart cart : _products) { //tax code 1 is equal to taxable 18%, 3 is equal to none taxable invoiceDetails.add(new InvoiceDetail(cart.getName(), "" + cart.getQuantity(), "3", "" + cart.getAmount())); } // _preferences.getString("NEW_ORDER_NO", null) List<Invoice> invoiceList = new ArrayList<>(); invoiceList.add( new Invoice( DateHelper.getCurrentDate(), DateHelper.getCurrentTime(), ""+ _orders.get(0).getOrderNo(), 6, null, _outlet.get(0).getName(), _outlet.get(0).getPhone(), "RoutePro", invoiceDetails ) ); Log.d("HERERECEIPT", _gson.toJson(invoiceList)); Vfd vfd = new Vfd(invoiceList); Gson gson = new GsonBuilder().create(); // Log.d("CUSTOMEROBJECT", _gson.toJson(vfd)); JsonObjectRequest jsonObjReq = new JsonObjectRequest(Request.Method.POST, url, new JSONObject(gson.toJson(vfd)), response -> { // Log.d(TAG, response.toString()); Receipt res = _gson.fromJson(String.valueOf(response), Receipt.class); _editor.putString("VFD_RECEIPT",_gson.toJson(res)); _editor.putString("OUTLET_NAME", _outlet.get(0).getName()); _editor.commit(); Log.d("LOGHAPAPOAVFD", "" + _gson.toJson(res)); new FragmentHelper(_c).replace(new ReceiptFragment(),"ReceiptFragment",R.id.fragment_placeholder); }, new Response.ErrorListener() { @Override public void onErrorResponse(VolleyError error) { VolleyLog.d(TAG, "Error: " + error.getMessage()); _progressBar.setVisibility(View.GONE); Toast.makeText(_c, "Kuna tatizo, kama linaendelea wasiliana na IT!", Toast.LENGTH_LONG).show(); } }) { //headers @Override public Map<String, String> getHeaders() throws AuthFailureError { HashMap<String, String> headers = new HashMap<String, String>(); headers.put("Content-Type", "application/json; charset=utf-8"); return headers; } }; mSingleton.getInstance(_c).addToRequestQueue(jsonObjReq); jsonObjReq.setRetryPolicy(new DefaultRetryPolicy(DefaultRetryPolicy.DEFAULT_TIMEOUT_MS * 2, DefaultRetryPolicy.DEFAULT_MAX_RETRIES, DefaultRetryPolicy.DEFAULT_BACKOFF_MULT)); } } <file_sep>/app/src/main/java/com/agnet/leteApp/models/Outlet.java package com.agnet.leteApp.models; import com.google.gson.annotations.SerializedName; public class Outlet { private int id; private String name,phone; private Double lat, lng; private String location; @SerializedName("qr_code") private String qrCode; public Outlet(int id, String name,String phone, String location, String qrCode){ this.id = id; this.name = name; this.phone = phone; this.location = location; this.qrCode = qrCode; } public int getId() { return id; } public String getName() { return name; } public Double getLat() { return lat; } public Double getLng() { return lng; } public String getLocation() { return location; } public String getPhone() { return phone; } public String getQrCode() { return qrCode; } } <file_sep>/app/src/main/java/com/agnet/leteApp/models/Order.java package com.agnet.leteApp.models; import com.google.gson.annotations.SerializedName; public class Order { private int id,status, userId, projectId,outletId; private String deviceTime, createdDate; private String lat,lng; @SerializedName("order_no") private int orderNo; public Order(int id, String deviceTime, int orderNo, int status, String lat, String lng, String createdDate, int userId, int projectId, int outletId){ this.id = id; this.deviceTime = deviceTime; this.orderNo = orderNo; this.status = status; this.userId = userId; this.createdDate = createdDate; this.lat = lat; this.lng = lng; this.projectId = projectId; this.outletId = outletId; } public int getId() { return id; } public int getOrderNo() { return orderNo; } public int getUserId() { return userId; } public String getCreatedDate() { return createdDate; } public int getStatus() { return status; } public String getDeviceTime() { return deviceTime; } public String getLat() { return lat; } public String getLng() { return lng; } public int getOutletId() { return outletId; } public int getProjectId() { return projectId; } } <file_sep>/app/src/main/java/com/agnet/leteApp/models/OutletImage.java package com.agnet.leteApp.models; public class OutletImage { private String name, img; public OutletImage(String img, String name) { this.name = name; this.img = img; } public String getImg() { return img; } public String getName() { return name; } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/mapping/MappingSuccessFragment.java package com.agnet.leteApp.fragments.main.mapping; import android.annotation.SuppressLint; import android.content.Context; import android.content.SharedPreferences; import android.os.Bundle; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.ProgressBar; import android.widget.TextView; import android.widget.Toast; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.fragments.main.adapters.FormAdapter; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.ResponseData; import com.agnet.leteApp.models.User; import com.agnet.leteApp.service.Endpoint; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.Response; import com.android.volley.RetryPolicy; import com.android.volley.VolleyError; import com.android.volley.toolbox.StringRequest; import com.google.gson.Gson; import java.util.HashMap; import java.util.Map; public class MappingSuccessFragment extends Fragment { private FragmentActivity _c; private Gson _gson; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private ProgressBar _progressBar; private String Token; private User _user; private int _projectId; private String _projectName; private RecyclerView _formList; private LinearLayoutManager _formLayoutManager; private Button _continueBtn; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_mapping_success, container, false); _c = getActivity(); _gson = new Gson(); _preferences = _c.getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _continueBtn = view.findViewById(R.id.continue_btn); _continueBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { new FragmentHelper(_c).replace(new MappingFormListFragment(),"MappingFormListFragment", R.id.fragment_placeholder); } }); return view; } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/sales/ProductsFragment.java package com.agnet.leteApp.fragments.main.sales; import android.annotation.SuppressLint; import android.content.Context; import android.content.SharedPreferences; import android.os.Bundle; import android.util.Log; import android.view.KeyEvent; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.LinearLayout; import android.widget.RelativeLayout; import android.widget.TextView; import android.widget.Toast; import androidx.annotation.Nullable; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import androidx.recyclerview.widget.GridLayoutManager; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.fragments.main.ProjectFragment; import com.agnet.leteApp.fragments.main.adapters.CategoryAdapter; import com.agnet.leteApp.fragments.main.adapters.ProductsAdapter; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.Category; import com.agnet.leteApp.models.ResponseData; import com.agnet.leteApp.models.User; import com.agnet.leteApp.service.Endpoint; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.RetryPolicy; import com.android.volley.VolleyError; import com.android.volley.toolbox.StringRequest; import com.facebook.shimmer.ShimmerFrameLayout; import com.google.gson.Gson; import java.util.HashMap; import java.util.List; import java.util.Map; public class ProductsFragment extends Fragment { private FragmentActivity _c; private RecyclerView _productsList, _categorytList; private LinearLayoutManager _productsLayoutManager, _categoryLayoutManager; private String Token; private SharedPreferences.Editor _editor; private SharedPreferences _preferences; private Gson _gson; private ShimmerFrameLayout _shimmerLoader; private User _user; private DatabaseHandler _dbHandler; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_products, container, false); _c = getActivity(); _preferences = getActivity().getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _gson = new Gson(); _dbHandler = new DatabaseHandler(_c); TextView username = view.findViewById(R.id.user_name); _categorytList= view.findViewById(R.id.category_list); _productsList= view.findViewById(R.id.product_list); _shimmerLoader = view.findViewById(R.id.shimmer_view_container); LinearLayout userAcc = view.findViewById(R.id.view_user_account_btn); TextView totalQnty = view.findViewById(R.id.total_qnty); RelativeLayout openCartBtn = view.findViewById(R.id.open_cart); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); String projectName = _preferences.getString("PROJECT_NAME", null); username.setText(projectName); int clientId = _preferences.getInt("CLIENT_ID", 0); getCategories(clientId); totalQnty.setText(""+_dbHandler.getTotalQnty()); } catch (NullPointerException e) { } _categoryLayoutManager= new LinearLayoutManager(_c, RecyclerView.HORIZONTAL, false); _categorytList.setLayoutManager(_categoryLayoutManager); _productsLayoutManager = new GridLayoutManager(_c, 2); _productsList.setLayoutManager(_productsLayoutManager); openCartBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { new FragmentHelper(_c).replaceWithbackStack(new CartFragment(),"CartFragment", R.id.fragment_placeholder); } }); return view; } @Override public void onPause() { super.onPause(); _shimmerLoader.setVisibility(View.GONE); _shimmerLoader.stopShimmerAnimation(); } @Override public void onActivityCreated(@Nullable Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); getView().setFocusableInTouchMode(true); getView().requestFocus(); getView().setOnKeyListener((v, keyCode, event) -> { if (event.getAction() == KeyEvent.ACTION_DOWN) { if (keyCode == KeyEvent.KEYCODE_BACK) { new FragmentHelper(_c).replaceWithbackStack(new ProjectFragment(),"ProjectFragment", R.id.fragment_placeholder); return true; } } return false; }); } public void getCategories(int id) { Log.d("CLIENT_ID", ""+id); _shimmerLoader.setVisibility(View.VISIBLE); _shimmerLoader.startShimmerAnimation(); Endpoint.setUrl("categories/client/"+id); String url = Endpoint.getUrl(); StringRequest postRequest = new StringRequest(Request.Method.GET, url, response -> { ResponseData res = _gson.fromJson(response, ResponseData.class); List<Category> categories = res.getCategories(); CategoryAdapter productsAdapter = new CategoryAdapter(_c,categories, this); _categorytList.setAdapter(productsAdapter); _shimmerLoader.setVisibility(View.GONE); _shimmerLoader.stopShimmerAnimation(); getProducts(categories.get(0).getId()); }, error -> { error.printStackTrace(); _shimmerLoader.setVisibility(View.GONE); _shimmerLoader.stopShimmerAnimation(); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_SHORT).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } public void getProducts(int cid) { Endpoint.setUrl("products/category/"+cid); String url = Endpoint.getUrl(); StringRequest postRequest = new StringRequest(Request.Method.GET, url, response -> { ResponseData res = _gson.fromJson(response, ResponseData.class); ProductsAdapter productsAdapter = new ProductsAdapter(_c,res.getProducts(), this); _productsList.setAdapter(productsAdapter); // Log.d("RESPONSEHERE", _gson.toJson(res.getProducts().get(0).getPrice())); }, error -> { error.printStackTrace(); _shimmerLoader.setVisibility(View.GONE); _shimmerLoader.stopShimmerAnimation(); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_SHORT).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/outlets/NewOutletFragment.java package com.agnet.leteApp.fragments.main.outlets; import android.annotation.SuppressLint; import android.content.Context; import android.content.SharedPreferences; import android.os.Bundle; import android.util.Log; import android.util.SparseArray; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.RelativeLayout; import android.widget.Spinner; import android.widget.TextView; import android.widget.Toast; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.CustomerType; import com.agnet.leteApp.models.User; import com.agnet.leteApp.service.Endpoint; import com.android.volley.DefaultRetryPolicy; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.Response; import com.android.volley.VolleyError; import com.android.volley.toolbox.StringRequest; import com.google.android.gms.samples.vision.barcodereader.BarcodeCapture; import com.google.android.gms.samples.vision.barcodereader.BarcodeGraphic; import com.google.android.gms.vision.barcode.Barcode; import com.google.android.material.bottomnavigation.BottomNavigationView; import com.google.gson.Gson; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import xyz.belvi.mobilevisionbarcodescanner.BarcodeRetriever; public class NewOutletFragment extends Fragment { private FragmentActivity _c; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private EditText _phone, _name; private LinearLayout _progressBar; private TextView _shopQrCode; private LinearLayout _newOuletBtn; private Spinner _custTypesSpinner, _vfdCustTypeSpinner; private String _shopType, _vfdType; private int _typeId, _vfdTypeId; private List<CustomerType> _customerTypes; private List<CustomerType> _vfdCustTypes; private EditText _vfdCustId; private Gson _gson; private User _user; private String Token; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_new_outlet, container, false); _c = getActivity(); _preferences = _c.getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _gson = new Gson(); _phone = view.findViewById(R.id.phone_input); _name = view.findViewById(R.id.name_input); _progressBar = view.findViewById(R.id.progress_bar_wrapper); _shopQrCode = view.findViewById(R.id.shop_qr_code); _newOuletBtn = view.findViewById(R.id.new_outlet_btn); _custTypesSpinner = view.findViewById(R.id.cust_types_spinner); _vfdCustTypeSpinner = view.findViewById(R.id.vfd_cust_type_spinner); _vfdCustId = view.findViewById(R.id.vfd_cust_id_input); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); } catch (NullPointerException e) { } _custTypesSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { @Override public void onItemSelected(AdapterView<?> adapterView, View view, int position, long l) { _shopType = adapterView.getItemAtPosition(position).toString(); _typeId = _customerTypes.get(position).getId(); } @Override public void onNothingSelected(AdapterView<?> adapterView) { } }); _vfdCustTypeSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { @Override public void onItemSelected(AdapterView<?> adapterView, View view, int position, long l) { if (position != 0) { _vfdCustId.setText("NIL"); _vfdCustId.setVisibility(View.GONE); } else { _vfdCustId.setVisibility(View.VISIBLE); } _vfdType = adapterView.getItemAtPosition(position).toString(); _vfdTypeId = _vfdCustTypes.get(position).getId(); } @Override public void onNothingSelected(AdapterView<?> adapterView) { } }); _newOuletBtn.setOnClickListener(view1 -> { String phone = _phone.getText().toString(); String name = _name.getText().toString(); String vfdId = _vfdCustId.getText().toString(); if(name.isEmpty()){ Toast.makeText(_c, "Ingiza jina la mteja!", Toast.LENGTH_LONG).show(); }else if(phone.isEmpty()){ Toast.makeText(_c, "Ingiza namba ya simu!", Toast.LENGTH_LONG).show(); }else if(_typeId == 0){ Toast.makeText(_c, "Chagua aina ya duka", Toast.LENGTH_LONG).show(); }else if(vfdId.isEmpty()){ Toast.makeText(_c, "Ingiza TIN", Toast.LENGTH_LONG).show(); }else { _progressBar.setVisibility(View.VISIBLE); _editor.putString("PHONE", "+255" + phone); _editor.putString("NAME", name); _editor.putInt("VFD_TYPE", _vfdTypeId); _editor.putString("VFD_ID", vfdId); _editor.putInt("OUTLET_TYPE_ID", _typeId); _editor.commit(); new FragmentHelper(_c).replaceWithbackStack(new NewBarcodeFragment(), "NewBarcodeFragment", R.id.fragment_placeholder); } }); getCustomerType(); getVfdCustomerType(); return view; } @Override public void onPause() { super.onPause(); _progressBar.setVisibility(View.GONE); } private void getCustomerType() { _customerTypes = new ArrayList<>(); _customerTypes.add(new CustomerType(0, "Chagua aina ya duka")); _customerTypes.add(new CustomerType(1, "Mini Supermarket")); _customerTypes.add(new CustomerType(2, "Bar")); _customerTypes.add(new CustomerType(3, "WholeSale")); _customerTypes.add(new CustomerType(4, "Duka")); _customerTypes.add(new CustomerType(5, "Supermarket")); _customerTypes.add(new CustomerType(6, "Kiosk")); // Creating adapter for spinner ArrayAdapter<CustomerType> dataAdapter = new ArrayAdapter<CustomerType>(_c, android.R.layout.simple_spinner_item, _customerTypes); // Drop down layout style - list view with radio button dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); // attaching data adapter to spinner _custTypesSpinner.setAdapter(dataAdapter); } private void getVfdCustomerType() { _vfdCustTypes = new ArrayList<>(); _vfdCustTypes.add(new CustomerType(1, "TIN NUMBER")); _vfdCustTypes.add(new CustomerType(6, "NILL")); // Creating adapter for spinner ArrayAdapter<CustomerType> dataAdapter = new ArrayAdapter<CustomerType>(_c, android.R.layout.simple_spinner_item, _vfdCustTypes); // Drop down layout style - list view with radio button dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); // attaching data adapter to spinner _vfdCustTypeSpinner.setAdapter(dataAdapter); } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/sales/OrderBarcodeFragment.java package com.agnet.leteApp.fragments.main.sales; import android.annotation.SuppressLint; import android.content.Context; import android.content.SharedPreferences; import android.os.Bundle; import android.util.Log; import android.util.SparseArray; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.LinearLayout; import android.widget.ProgressBar; import android.widget.Toast; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.fragments.main.outlets.NewOutletFragment; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.helpers.DateHelper; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.Outlet; import com.agnet.leteApp.models.ResponseData; import com.agnet.leteApp.models.User; import com.agnet.leteApp.service.Endpoint; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.RetryPolicy; import com.android.volley.VolleyError; import com.android.volley.toolbox.StringRequest; import com.google.android.gms.samples.vision.barcodereader.BarcodeCapture; import com.google.android.gms.samples.vision.barcodereader.BarcodeGraphic; import com.google.android.gms.vision.barcode.Barcode; import com.google.gson.Gson; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Random; import xyz.belvi.mobilevisionbarcodescanner.BarcodeRetriever; public class OrderBarcodeFragment extends Fragment implements BarcodeRetriever { private FragmentActivity _c; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private Gson _gson; private User _user; private String Token; private String _phone, _name, _vfdId; private int _vfdType, _outletTypeId; private BarcodeCapture barcodeCapture; private String _lng, _lat; private String _location; private ProgressBar _progressBar; private LinearLayout _transparentLoader; private DatabaseHandler _dbHandler; private int _projectId; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_barcode, container, false); _c = getActivity(); barcodeCapture = (BarcodeCapture) getChildFragmentManager().findFragmentById(R.id.barcode); barcodeCapture.setRetrieval(this); _preferences = _c.getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _dbHandler = new DatabaseHandler(_c); _gson = new Gson(); _progressBar = view.findViewById(R.id.progress_bar); _transparentLoader = view.findViewById(R.id.transparent_loader); Button registerOutletBt = view.findViewById(R.id.register_outlet_btn); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); _projectId = _preferences.getInt("PROJECT_ID",0); } catch (NullPointerException e) { } registerOutletBt.setOnClickListener(view1 -> new FragmentHelper(_c).replace(new NewOutletFragment(), "NewOutletFragment", R.id.fragment_placeholder)); return view; } @Override public void onRetrieved(Barcode barcode) { // Log.d(TAG, "Barcode read: " + barcode.displayValue); _c.runOnUiThread(new Runnable() { @Override public void run() { isQrCodeAvailable(barcode.displayValue); } }); } @Override public void onRetrievedMultiple(Barcode closetToClick, List<BarcodeGraphic> barcode) { } @Override public void onBitmapScanned(SparseArray<Barcode> sparseArray) { } @Override public void onRetrievedFailed(String reason) { } @Override public void onPermissionRequestDenied() { } private void isQrCodeAvailable(String qrcode) { Endpoint.setUrl("outlet/qrcode/"+qrcode); String url = Endpoint.getUrl(); StringRequest postRequest = new StringRequest(Request.Method.GET, url, response -> { _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); ResponseData res = _gson.fromJson(response, ResponseData.class); if (res.getCode() == 409) { // Log.d("HEREHAPA", response); Outlet outlet = res.getOutlet(); saveOrder(outlet.getId()); _editor.putString("OUTLET_OBJ", _gson.toJson(outlet)); _editor.commit(); } else { Toast.makeText(_c, "Qr code haipo, sajili qr code", Toast.LENGTH_LONG).show(); } // }, error -> { error.printStackTrace(); _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_SHORT).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } public void saveOrder(int outletId) { _transparentLoader.setVisibility(View.VISIBLE); _progressBar.setVisibility(View.VISIBLE); Random rand = new Random(); int orderNoRandom = rand.nextInt((9999 - 100) + 1) + 10; Endpoint.setUrl("order"); String url = Endpoint.getUrl(); StringRequest postRequest = new StringRequest(Request.Method.POST, url, response -> { _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); //Log.d("CARTPRODUCT", response); ResponseData res = _gson.fromJson(response, ResponseData.class); if (res.getCode() == 201) { _editor.putInt("ORDER_NO",res.getOrder().getOrderNo()); _editor.commit(); Log.d("CARTPRODUCT", _gson.toJson(res.getOrder().getOrderNo())); new FragmentHelper(_c).replace(new ReceiptFragment(), "ReceiptFragment", R.id.fragment_placeholder); // new FragmentHelper(_c).replace(new SalesSuccessFragment(), "SalesSuccessFragment", R.id.fragment_placeholder); } }, error -> { error.printStackTrace(); _transparentLoader.setVisibility(View.GONE); _progressBar.setVisibility(View.GONE); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_LONG).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } @Override protected Map<String, String> getParams() { Map<String, String> params = new HashMap<String, String>(); params.put("createdDate", DateHelper.getCurrentDate()); params.put("deviceTime", DateHelper.getCurrentDate() + " " + DateHelper.getCurrentTime()); params.put("userId", "" + _user.getId()); params.put("orderNo", "" + orderNoRandom); params.put("lat", _preferences.getString("mLATITUDE", null)); params.put("lng", _preferences.getString("mLONGITUDE", null)); params.put("products", _gson.toJson(_dbHandler.getCart())); params.put("outletId", "" + outletId); params.put("projectId", "" + _projectId); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/adapters/ProjectTypeAdapter.java package com.agnet.leteApp.fragments.main.adapters; import android.content.Context; import android.content.SharedPreferences; import android.graphics.Color; import android.graphics.drawable.Drawable; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.RelativeLayout; import android.widget.TextView; import android.widget.Toast; import androidx.annotation.Nullable; import androidx.fragment.app.Fragment; import androidx.recyclerview.widget.RecyclerView; import com.agnet.leteApp.R; import com.agnet.leteApp.fragments.main.HomeFragment; import com.agnet.leteApp.fragments.main.ProjectFragment; import com.agnet.leteApp.fragments.main.mapping.MappingFormListFragment; import com.agnet.leteApp.fragments.main.merchandise.MerchandiseFormFragment; import com.agnet.leteApp.fragments.main.sales.ProductsFragment; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.Project; import com.agnet.leteApp.models.ProjectType; import com.agnet.leteApp.service.Endpoint; import com.bumptech.glide.Glide; import com.bumptech.glide.load.DataSource; import com.bumptech.glide.load.engine.GlideException; import com.bumptech.glide.request.RequestListener; import com.bumptech.glide.request.target.Target; import com.google.android.gms.vision.text.Line; import java.text.DecimalFormat; import java.util.Collections; import java.util.List; /** * Created by alicephares on 8/5/16. */ public class ProjectTypeAdapter extends RecyclerView.Adapter<ProjectTypeAdapter.ViewHolder> { private List<ProjectType> types = Collections.emptyList(); private LayoutInflater inflator; private Context c; private SharedPreferences preferences; private SharedPreferences.Editor editor; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private int selected_position = 0; private HomeFragment fragment; // Provide a suitable constructor (depends on the kind of dataset) public ProjectTypeAdapter(Context c, List<ProjectType> types, HomeFragment fragment) { this.types = types; this.inflator = LayoutInflater.from(c); this.c = c; this.fragment = fragment; _preferences = c.getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); } // Create new views (invoked by the layout manager) @Override public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { // create a new view View v = inflator.inflate(R.layout.card_project_type, parent, false); // set the view's size, margins, padding and layout parameters ViewHolder vh = new ViewHolder(c, v); return vh; } int count = 0; // Replace the contents of a view (invoked by the layout manager) @Override public void onBindViewHolder(final ViewHolder holder, final int position) { //get a position of a current saleItem final ProjectType currentType = types.get(position); holder.mName.setText(currentType.getName()); holder.mStats.setText(""+currentType.getStats()); glide(holder.mIcon, currentType.getIcon()); /* if (selected_position == position) { holder.mWrapper.setBackgroundResource(R.drawable.round_corners_blue); holder.mName.setTextColor(Color.parseColor("#ffffff")); holder.mIconWrapper.setBackgroundResource(R.drawable.round_corners_blue); holder.mWrapper.setPadding(5, 5, 5, 5); // fragment.setProjectType(currentType.getName()); glide(holder.mIcon, currentType.getSelectedIcon()); } else { holder.mWrapper.setBackgroundResource(R.drawable.round_corners_white); holder.mName.setTextColor(Color.parseColor("#000000")); holder.mIconWrapper.setBackgroundResource(R.drawable.round_corners_with_grey_bg); // holder.mWrapper.setPadding(10,10,10,10); glide(holder.mIcon, currentType.getIcon()); }*/ holder.mWrapper.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if(currentType.getName().equals("Uwepo")){ new FragmentHelper(c).replace(new ProjectFragment(),"ProjectFragment", R.id.fragment_placeholder); }else if(currentType.getName().equals("Vipeperushi")){ new FragmentHelper(c).replace(new ProjectFragment(),"ProjectFragment", R.id.fragment_placeholder); }else if(currentType.getName().equals("Mauzo")){ fragment.showDIalog(); }else { Toast.makeText(c, "Huna ruhusa ya kuingia hapa!", Toast.LENGTH_SHORT).show(); } //store type of the project _editor.putString("SELECTED_PROJECT_TYPE",currentType.getName()); _editor.commit(); } }); } private void glide(ImageView view, String url) { Glide.with(c).load(displayImg(view, url)) .override(Target.SIZE_ORIGINAL, Target.SIZE_ORIGINAL) .error(R.drawable.ic_place_holder) .listener(new RequestListener<Drawable>() { @Override public boolean onLoadFailed(@Nullable GlideException e, Object model, Target<Drawable> target, boolean isFirstResource) { return false; } @Override public boolean onResourceReady(Drawable resource, Object model, Target<Drawable> target, DataSource dataSource, boolean isFirstResource) { return false; } }).into(view); } private int displayImg(ImageView view, String url) { Context context = view.getContext(); int id = context.getResources().getIdentifier(url, "drawable", context.getPackageName()); return id; } public static class ViewHolder extends RecyclerView.ViewHolder { public LinearLayout mWrapper, mIconWrapper; public TextView mName; public ImageView mIcon; public TextView mStats; public ViewHolder(Context context, View view) { super(view); mWrapper = view.findViewById(R.id.type_wrapper); mName = view.findViewById(R.id.type_name); mIcon = view.findViewById(R.id.icon); mIconWrapper = view.findViewById(R.id.icon_wrapper); mStats = view.findViewById(R.id.stats); } } // Return the size of your dataset (invoked by the layout manager) @Override public int getItemCount() { return types.size(); } }<file_sep>/app/src/main/java/com/agnet/leteApp/models/ProjectType.java package com.agnet.leteApp.models; import com.google.gson.annotations.SerializedName; public class ProjectType { int id; String name, icon; int stats; public ProjectType(int id, String name, String icon, int stats){ this.id = id; this.name =name; this.icon = icon; this.stats = stats; } public int getId() { return id; } public String getName() { return name; } public String getIcon() { return icon; } @Override public String toString() { return name; } public int getStats() { return stats; } } <file_sep>/app/src/main/java/com/agnet/leteApp/models/ResponseData.java package com.agnet.leteApp.models; import com.google.gson.annotations.SerializedName; import java.util.List; public class ResponseData { User user; List<Street> streets; List<Customer> customers; // @SerializedName("data") List<Product> products; List<Category> categories; List<Sku> skus; List<History> orders; Order order; List<Partner> partners; List<Outlet> outlets; Customer customer; Outlet outlet; int code; String flag; String token; Success success; List<Project> projects; List<Form> forms; Form form; String error; Stat stats; public ResponseData(List streets, List products, List categories, List skus, List<History> orders, List<Partner> partners, User user, Customer customer,Order order, int code, String flag, Outlet outlet, String token, Success success, List<Project> projects, List<Form> forms, Form form, List<Quesionnaire> questions, List<Outlet> outlets, String error, Stat stats) { this.streets = streets; this.products = products; this.categories = categories; this.skus = skus; this.orders = orders; this.partners = partners; this.user = user; this.customer = customer; this.code = code; this.flag = flag; this.outlet = outlet; this.token = token; this.success = success; this.projects = projects; this.forms = forms; this.form = form; this.outlets = outlets; this.error = error; this.stats = stats; this.order = order; } public List<Partner> getPartners() { return partners; } public List<Street> getStreets() { return streets; } public List<Product> getProducts() { return products; } public List<Customer> getCustomers() { return customers; } public List<Category> getCategories() { return categories; } public List<Sku> getSkus() { return skus; } public List<History> getOrders() { return orders; } public User getUser() { return user; } public Customer getCustomer() { return customer; } public int getCode() { return code; } public String getFlag() { return flag; } public Outlet getOutlet() { return outlet; } public String getToken() { return token; } public Success getSuccess() { return success; } public List<Project> getProjects() { return projects; } public List<Form> getForms() { return forms; } public Form getForm() { return form; } public List<Outlet> getOutlets() { return outlets; } public String getError() { return error; } public Stat getStats() { return stats; } public Order getOrder() { return order; } } <file_sep>/app/src/main/java/com/agnet/leteApp/helpers/RecyclerViewerAdapter.java package com.agnet.leteApp.helpers; public class RecyclerViewerAdapter { } <file_sep>/app/src/main/java/com/agnet/leteApp/models/Quesionnaire.java package com.agnet.leteApp.models; import com.google.gson.annotations.SerializedName; import java.util.List; public class Quesionnaire { private int id; @SerializedName("control_type_id") private int typeId; private String question; @SerializedName("question_options") private List<Option> options; public Quesionnaire(int id, int typeId, String question,List<Option> options){ this.typeId = typeId; this.question = question; this.options = options; this.id = id; } public int getId() { return id; } public int getTypeId() { return typeId; } public String getQuestion() { return question; } public List<Option> getOptions() { return options; } } <file_sep>/app/src/main/java/com/agnet/leteApp/models/Customer.java package com.agnet.leteApp.models; import com.google.gson.annotations.SerializedName; public class Customer { private int id; String name, district, lat, lon, url, phone, qrCode, vfd_cust_id,vfd_cust_type; @SerializedName("customer_type") private String type; private int serverId; @SerializedName("street_id") private int streetId; public Customer(int id, String name, int streetId, String district, String url, String lat, String lon, int serverId, String phone, String qrCode, String type, String vfd_cust_id, String vfd_cust_type){ this.id = id; this.name = name; this.streetId = streetId; this.district = district; this.url = url; this.lat = lat; this.lon = lon; this.serverId = serverId; this.phone = phone; this.qrCode = qrCode; this.type = type; this.vfd_cust_id = vfd_cust_id; this.vfd_cust_type = vfd_cust_type; } public int getId() { return id; } public String getName() { return name; } public int getStreetId() { return streetId; } public String getUrl() { return url; } public String getDistrict() { return district; } public String getLat() { return lat; } public String getLon() { return lon; } public String getPhone() { return phone; } public String getQrCode() { return qrCode; } public String getType() { return type; } public int getServerId() { return serverId; } public String getVfdCustId() { return vfd_cust_id; } public String getVfdCustType() { return vfd_cust_type; } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/HomeFragment.java package com.agnet.leteApp.fragments.main; import android.annotation.SuppressLint; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.SharedPreferences; import android.graphics.Color; import android.os.Bundle; import android.os.Handler; import android.util.Log; import android.view.KeyEvent; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.Spinner; import android.widget.TextView; import android.widget.Toast; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import androidx.recyclerview.widget.GridLayoutManager; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import com.agnet.leteApp.R; import com.agnet.leteApp.application.mSingleton; import com.agnet.leteApp.fragments.main.adapters.ProjectAdapter; import com.agnet.leteApp.fragments.main.adapters.ProjectTypeAdapter; import com.agnet.leteApp.fragments.main.dialogs.QrcodeBtmSheet; import com.agnet.leteApp.helpers.AndroidDatabaseManager; import com.agnet.leteApp.helpers.DatabaseHandler; import com.agnet.leteApp.helpers.DateHelper; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.CustomerType; import com.agnet.leteApp.models.Outlet; import com.agnet.leteApp.models.Project; import com.agnet.leteApp.models.ProjectType; import com.agnet.leteApp.models.ResponseData; import com.agnet.leteApp.models.Stat; import com.agnet.leteApp.models.User; import com.agnet.leteApp.service.Endpoint; import com.android.volley.NetworkResponse; import com.android.volley.Request; import com.android.volley.RetryPolicy; import com.android.volley.VolleyError; import com.android.volley.toolbox.StringRequest; import com.facebook.shimmer.ShimmerFrameLayout; import com.google.android.material.bottomsheet.BottomSheetBehavior; import com.google.gson.Gson; import com.mikhaellopez.circularprogressbar.CircularProgressBar; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; public class HomeFragment extends Fragment { private FragmentActivity _c; private RecyclerView _projectTypeList, _projectList, _outletList; private GridLayoutManager _projectTypeLayoutManager; private String Token; private SharedPreferences.Editor _editor; private SharedPreferences _preferences; private Gson _gson; private User _user; private Spinner _projectTypesSpinner; private List<ProjectType> _projectTypesData; private TextView _revenue, _revenueTarget, _mappingCount, _mappingTarget, _merchandiseCount, _merchandiseTarget, _outletCount, _outletTarget; private ShimmerFrameLayout _shimmer; private CircularProgressBar _circularSales, _circularMapping, _circularMerchandise, _circularOutlets; private BottomSheetBehavior behavior; private DatabaseHandler _dbHandler; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_home, container, false); _c = getActivity(); _preferences = getActivity().getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _gson = new Gson(); _dbHandler = new DatabaseHandler(_c); //binding TextView username = view.findViewById(R.id.user_name); _projectTypeList = view.findViewById(R.id.project_type_list); _projectList = view.findViewById(R.id.project_list); _outletList = view.findViewById(R.id.outlet_list); _revenue = view.findViewById(R.id.revenue); _revenueTarget = view.findViewById(R.id.revenue_target); _mappingCount = view.findViewById(R.id.mapping_count); _mappingTarget = view.findViewById(R.id.mapping_target); _merchandiseCount = view.findViewById(R.id.merchandise_count); _merchandiseTarget = view.findViewById(R.id.merchandise_target); _outletCount = view.findViewById(R.id.outlet_count); _outletTarget = view.findViewById(R.id.outlet_target); _shimmer = view.findViewById(R.id.shimmer_view_container); _circularSales = view.findViewById(R.id.circular_bar_sales); _circularMapping = view.findViewById(R.id.circular_bar_mapping); _circularMerchandise = view.findViewById(R.id.circular_bar_merchandise); _circularOutlets = view.findViewById(R.id.circular_bar_outlets); //register shop dialog Button regByPhoneBtn = _c.findViewById(R.id.register_byPhone_btn); EditText phoneTxt = _c.findViewById(R.id.phone_input); EditText nameTxt = _c.findViewById(R.id.name_input); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); username.setText(_user.getName()); } catch (NullPointerException e) { } // _projectTypeLayoutManager = new LinearLayoutManager(_c, RecyclerView.HORIZONTAL, false); _projectTypeLayoutManager = new GridLayoutManager(_c, 3); _projectTypeList.setLayoutManager(_projectTypeLayoutManager); ProjectTypeAdapter typeAdapter = new ProjectTypeAdapter(_c, getProjectTypes(), this); _projectTypeList.setAdapter(typeAdapter); username.setOnLongClickListener(new View.OnLongClickListener() { @Override public boolean onLongClick(View view) { Intent intent = new Intent(_c, AndroidDatabaseManager.class); _c.startActivity(intent); return false; } }); View bottomSheet = _c.findViewById(R.id.bottom_sheet); behavior = BottomSheetBehavior.from(bottomSheet); behavior.setBottomSheetCallback(new BottomSheetBehavior.BottomSheetCallback() { @Override public void onStateChanged(@NonNull View bottomSheet, int newState) { // React to state change if (newState == BottomSheetBehavior.STATE_SETTLING) { phoneTxt.setText(""); nameTxt.setText(""); } } @Override public void onSlide(@NonNull View bottomSheet, float slideOffset) { // React to dragging events } }); regByPhoneBtn.setOnClickListener(view1 -> { String phone = phoneTxt.getText().toString(); String name = nameTxt.getText().toString(); if(name.isEmpty()){ Toast.makeText(getContext(), "Ingiza jina la mteja!", Toast.LENGTH_LONG).show(); }else if(phone.isEmpty()){ Toast.makeText(getContext(), "Ingiza namba ya simu!", Toast.LENGTH_LONG).show(); }else { _dbHandler.createOutlet(new Outlet( 0,name,"255"+phone, "","" )); Toast.makeText(_c, "App Inapakua subiri....", Toast.LENGTH_LONG).show(); final Handler handler = new Handler(); handler.postDelayed(new Runnable() { @Override public void run() { // Do something after 5s = 5000ms behavior.setState(BottomSheetBehavior.STATE_COLLAPSED); new FragmentHelper(getActivity()).replace(new ProjectFragment(),"ProjectFragment", R.id.fragment_placeholder); } }, 5000); } }); getAgentStats(); return view; } @Override public void onActivityCreated(@Nullable Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); getView().setFocusableInTouchMode(true); getView().requestFocus(); getView().setOnKeyListener((v, keyCode, event) -> { if (event.getAction() == KeyEvent.ACTION_DOWN) { if (keyCode == KeyEvent.KEYCODE_BACK) { _c.finish(); return true; } } return false; }); } @Override public void onPause() { super.onPause(); _shimmer.setVisibility(View.GONE); _shimmer.stopShimmerAnimation(); } private List<ProjectType> getProjectTypes() { List<ProjectType> list = new ArrayList<>(); list.add(new ProjectType(1, "Mauzo", "ic_truck", 100)); list.add(new ProjectType(2, "Uwepo", "ic_mapping", 12)); list.add(new ProjectType(3, "Vipeperushi", "ic_merchandise", 10)); list.add(new ProjectType(4, "Maduka", "ic_outlets", 0)); list.add(new ProjectType(4, "Risiti", "ic_receipts", 0)); list.add(new ProjectType(4, "Pata ujumbe", "ic_notifications", 0)); return list; } public void showDIalog() { behavior.setState(BottomSheetBehavior.STATE_EXPANDED); } private void setupCircularBar(CircularProgressBar bar, float progress, float max, String progressColor, String bgColor) { // Set Progress // _circularProgressBar.setProgress(65f); // or with animation bar.setProgressWithAnimation(progress, Long.valueOf(1000)); // =1s // Set Progress Max bar.setProgressMax(max); // Set ProgressBar Color bar.setProgressBarColor(Color.parseColor(progressColor)); // or with gradient /* _circularProgressBar.setProgressBarColorStart(Color.GRAY); _circularProgressBar.setProgressBarColorEnd(Color.parseColor("#001689")); _circularProgressBar.setProgressBarColorDirection(CircularProgressBar.GradientDirection.TOP_TO_BOTTOM);*/ // Set background ProgressBar Color bar.setBackgroundProgressBarColor(Color.parseColor(bgColor)); // or with gradient /* _circularProgressBar.setBackgroundProgressBarColorStart(Color.WHITE); _circularProgressBar.setBackgroundProgressBarColorEnd(Color.parseColor("#001689")); _circularProgressBar.setBackgroundProgressBarColorDirection(CircularProgressBar.GradientDirection.TOP_TO_BOTTOM);*/ // Set Width bar.setProgressBarWidth(6f); // in DP bar.setBackgroundProgressBarWidth(6f); // in DP // Other bar.setRoundBorder(true); // _circularProgressBar.setStartAngle(180f); bar.setProgressDirection(CircularProgressBar.ProgressDirection.TO_RIGHT); } private void getAgentStats() { String start = DateHelper.getCurrentDate(); String end = DateHelper.getCurrentDate(); Endpoint.setUrl("agent/" + _user.getId() + "/stats?start=" + start + "&end=" + end); String url = Endpoint.getUrl(); _shimmer.setVisibility(View.VISIBLE); _shimmer.startShimmerAnimation(); ; StringRequest postRequest = new StringRequest(Request.Method.GET, url, response -> { ResponseData res = _gson.fromJson(response, ResponseData.class); // Log.d("HEREHERESANA", _gson.toJson(res)); if (res.getCode() == 200) { Stat stat = res.getStats(); _revenue.setText(stat.getRevenueFormatted()); _revenueTarget.setText(stat.getRevenueTargetFormatted()); _mappingCount.setText("" + stat.getMappingCount()); _mappingTarget.setText("" + stat.getMappingTarget()); _merchandiseCount.setText("" + stat.getMerchandiseCount()); _merchandiseTarget.setText("" + stat.getMerchandiseTarget()); _outletCount.setText("" + stat.getOutletCount()); _outletTarget.setText("" + stat.getOutletTarget()); setupCircularBar(_circularSales, stat.getRevenue(), stat.getRevenueTarget(), "#001689", "#ffffff"); setupCircularBar(_circularMapping, stat.getMappingCount(), stat.getMappingTarget(), "#ffb400", "#ffffff"); setupCircularBar(_circularMerchandise, +stat.getMerchandiseCount(), stat.getMerchandiseTarget(), "#34b0c3", "#ffffff"); setupCircularBar(_circularOutlets, stat.getOutletCount(), stat.getOutletTarget(), "#ed1c24", "#ffffff"); _editor.putString("STAT", _gson.toJson(stat)); _editor.commit(); } _shimmer.setVisibility(View.GONE); _shimmer.stopShimmerAnimation(); }, error -> { error.printStackTrace(); _shimmer.setVisibility(View.GONE); _shimmer.stopShimmerAnimation(); NetworkResponse response = error.networkResponse; String errorMsg = ""; if (response != null && response.data != null) { String errorString = new String(response.data); Log.i("log error", errorString); //TODO: display errors based on the message from the server Toast.makeText(_c, "Kuna tatizo, angalia mtandao alafu jaribu tena", Toast.LENGTH_SHORT).show(); } } ) { @Override public Map<String, String> getHeaders() { Map<String, String> params = new HashMap<String, String>(); params.put("Authorization", "Bearer " + "" + Token); return params; } }; mSingleton.getInstance(_c).addToRequestQueue(postRequest); postRequest.setRetryPolicy(new RetryPolicy() { @Override public int getCurrentTimeout() { return 50000; } @Override public int getCurrentRetryCount() { return 50000; } @Override public void retry(VolleyError error) throws VolleyError { } }); } } <file_sep>/app/src/main/java/com/agnet/leteApp/fragments/main/sales/OutletPhoneNumberFragment.java package com.agnet.leteApp.fragments.main.sales; import android.annotation.SuppressLint; import android.content.Context; import android.content.SharedPreferences; import android.os.Bundle; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.Spinner; import android.widget.TextView; import android.widget.Toast; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentActivity; import com.agnet.leteApp.R; import com.agnet.leteApp.fragments.main.mapping.MappingFormListFragment; import com.agnet.leteApp.fragments.main.merchandise.MerchandiseFormFragment; import com.agnet.leteApp.fragments.main.outlets.NewBarcodeFragment; import com.agnet.leteApp.helpers.FragmentHelper; import com.agnet.leteApp.models.CustomerType; import com.agnet.leteApp.models.User; import com.google.gson.Gson; import java.util.ArrayList; import java.util.List; public class OutletPhoneNumberFragment extends Fragment { private FragmentActivity _c; private SharedPreferences _preferences; private SharedPreferences.Editor _editor; private EditText _phone, _name; private LinearLayout _progressBar; private LinearLayout _newOuletBtn; private Gson _gson; private User _user; private String Token,_projectType; @SuppressLint("RestrictedApi") @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_outlet_phone_no, container, false); _c = getActivity(); _preferences = _c.getSharedPreferences("SharedData", Context.MODE_PRIVATE); _editor = _preferences.edit(); _gson = new Gson(); _phone = view.findViewById(R.id.phone_input); _name = view.findViewById(R.id.name_input); _newOuletBtn = view.findViewById(R.id.new_outlet_btn); _progressBar = view.findViewById(R.id.progress_bar_wrapper); try { _user = _gson.fromJson(_preferences.getString("User", null), User.class); Token = _preferences.getString("TOKEN", null); _projectType = _preferences.getString("PROJECT_TYPE", null); } catch (NullPointerException e) { } _newOuletBtn.setOnClickListener(view1 -> { String phone = _phone.getText().toString(); String name = _name.getText().toString(); if(name.isEmpty()){ Toast.makeText(_c, "Ingiza jina la mteja!", Toast.LENGTH_LONG).show(); }else if(phone.isEmpty()){ Toast.makeText(_c, "Ingiza namba ya simu!", Toast.LENGTH_LONG).show(); }else { _progressBar.setVisibility(View.VISIBLE); _editor.putString("PHONE", "00255"+phone); _editor.putString("NAME", name); _editor.commit(); new FragmentHelper(_c).replaceWithbackStack(new ProductsFragment(), "ProductsFragment", R.id.fragment_placeholder); } }); return view; } @Override public void onPause() { super.onPause(); _progressBar.setVisibility(View.GONE); } }
80175618741bac2ba5b6f03ad266df0558003071
[ "Java" ]
21
Java
siniga/lete_drop
2062176880eee2c20c13e85a75c10a2b1fc4490b
ee2fe86f0f570c02672f0548f17190eae7d676ab
refs/heads/master
<file_sep><?php namespace App\Controller; use App\Controller\AppController; class ComandoController extends AppController { public function index() { $this->loadModel('Comandos'); $comando = $this->Comandos->get(1); echo $comando->comando; die; } }<file_sep><?php namespace App\Controller; use App\Controller\AppController; /** * Commands Controller * * @property \App\Model\Table\CommandsTable $Commands * * @method \App\Model\Entity\Command[]|\Cake\Datasource\ResultSetInterface paginate($object = null, array $settings = []) */ class CommandsController extends AppController { /** * Index method * * @return \Cake\Http\Response|null */ public function index() { $commands = $this->paginate($this->Commands); $this->set(compact('commands')); } /** * View method * * @param string|null $id Command id. * @return \Cake\Http\Response|null * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function view($id = null) { $command = $this->Commands->get($id, [ 'contain' => ['Schedules'], ]); $this->set('command', $command); } /** * Add method * * @return \Cake\Http\Response|null Redirects on successful add, renders view otherwise. */ public function add() { $command = $this->Commands->newEntity(); if ($this->request->is('post')) { $command = $this->Commands->patchEntity($command, $this->request->getData()); if ($this->Commands->save($command)) { $this->Flash->success(__('The command has been saved.')); return $this->redirect(['action' => 'index']); } $this->Flash->error(__('The command could not be saved. Please, try again.')); } $this->set(compact('command')); } /** * Edit method * * @param string|null $id Command id. * @return \Cake\Http\Response|null Redirects on successful edit, renders view otherwise. * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function edit($id = null) { $command = $this->Commands->get($id, [ 'contain' => [], ]); if ($this->request->is(['patch', 'post', 'put'])) { $command = $this->Commands->patchEntity($command, $this->request->getData()); if ($this->Commands->save($command)) { $this->Flash->success(__('The command has been saved.')); return $this->redirect(['action' => 'index']); } $this->Flash->error(__('The command could not be saved. Please, try again.')); } $this->set(compact('command')); } /** * Delete method * * @param string|null $id Command id. * @return \Cake\Http\Response|null Redirects to index. * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function delete($id = null) { $this->request->allowMethod(['post', 'delete']); $command = $this->Commands->get($id); if ($this->Commands->delete($command)) { $this->Flash->success(__('The command has been deleted.')); } else { $this->Flash->error(__('The command could not be deleted. Please, try again.')); } return $this->redirect(['action' => 'index']); } public function send($id){ $command_send = ''; $command = $this->Commands->find() ->where([ 'id' => $id, 'executed' => 'n' ]) ->first(); if($command){ $command_send = $command->command; $command->executed = 'y'; $this->Commands->save($command); } echo ($command_send); die; } public function alter(){ if ($this->request->is('post')) { $data = $this->request->getData(); // debug($data);die; $command = $this->Commands->get(1); $command->command = $data['command']; $command->executed = 'n'; $this->Commands->save($command); // $this->Commands->updateAll([ // 'command' => $data['command'] // ],[ // 'id' => 1 // ]); }; $command = $this->Commands->newEntity(); $this->set(compact('command')); } } <file_sep><?php namespace App\Model\Entity; use Cake\ORM\Entity; /** * Schedule Entity * * @property int $id * @property int|null $command_id * @property string|null $command_send * @property string|null $type * @property \Cake\I18n\FrozenTime|null $date_time * @property string|null $specific_date * @property string|null $days_week * @property \Cake\I18n\FrozenTime|null $created * @property \Cake\I18n\FrozenTime|null $modified * * @property \App\Model\Entity\Command $command */ class Schedule extends Entity { /** * Fields that can be mass assigned using newEntity() or patchEntity(). * * Note that when '*' is set to true, this allows all unspecified fields to * be mass assigned. For security purposes, it is advised to set '*' to false * (or remove it), and explicitly make individual fields accessible as needed. * * @var array */ protected $_accessible = [ 'command_id' => true, 'command_send' => true, 'type' => true, 'date_time' => true, 'specific_date' => true, 'days_week' => true, 'created' => true, 'modified' => true, 'command' => true, ]; } <file_sep><?php /** * @var \App\View\AppView $this * @var \App\Model\Entity\Horario $horario */ ?> <nav class="large-3 medium-4 columns" id="actions-sidebar"> <ul class="side-nav"> <li class="heading"><?= __('Actions') ?></li> <li><?= $this->Html->link(__('Edit Horario'), ['action' => 'edit', $horario->id]) ?> </li> <li><?= $this->Form->postLink(__('Delete Horario'), ['action' => 'delete', $horario->id], ['confirm' => __('Are you sure you want to delete # {0}?', $horario->id)]) ?> </li> <li><?= $this->Html->link(__('List Horarios'), ['action' => 'index']) ?> </li> <li><?= $this->Html->link(__('New Horario'), ['action' => 'add']) ?> </li> </ul> </nav> <div class="horarios view large-9 medium-8 columns content"> <h3><?= h($horario->id) ?></h3> <table class="vertical-table"> <tr> <th scope="row"><?= __('Comando') ?></th> <td><?= h($horario->comando) ?></td> </tr> <tr> <th scope="row"><?= __('Id') ?></th> <td><?= $this->Number->format($horario->id) ?></td> </tr> <tr> <th scope="row"><?= __('Execucao') ?></th> <td><?= h($horario->execucao) ?></td> </tr> <tr> <th scope="row"><?= __('Created') ?></th> <td><?= h($horario->created) ?></td> </tr> <tr> <th scope="row"><?= __('Modified') ?></th> <td><?= h($horario->modified) ?></td> </tr> </table> </div> <file_sep><?php /** * @var \App\View\AppView $this * @var \App\Model\Entity\Command $command */ ?> <nav class="large-3 medium-4 columns" id="actions-sidebar"> <ul class="side-nav"> <li class="heading"><?= __('Actions') ?></li> <li><?= $this->Html->link(__('Edit Command'), ['action' => 'edit', $command->id]) ?> </li> <li><?= $this->Form->postLink(__('Delete Command'), ['action' => 'delete', $command->id], ['confirm' => __('Are you sure you want to delete # {0}?', $command->id)]) ?> </li> <li><?= $this->Html->link(__('List Commands'), ['action' => 'index']) ?> </li> <li><?= $this->Html->link(__('New Command'), ['action' => 'add']) ?> </li> <li><?= $this->Html->link(__('List Schedules'), ['controller' => 'Schedules', 'action' => 'index']) ?> </li> <li><?= $this->Html->link(__('New Schedule'), ['controller' => 'Schedules', 'action' => 'add']) ?> </li> </ul> </nav> <div class="commands view large-9 medium-8 columns content"> <h3><?= h($command->id) ?></h3> <table class="vertical-table"> <tr> <th scope="row"><?= __('Command') ?></th> <td><?= h($command->command) ?></td> </tr> <tr> <th scope="row"><?= __('Executed') ?></th> <td><?= h($command->executed) ?></td> </tr> <tr> <th scope="row"><?= __('Id') ?></th> <td><?= $this->Number->format($command->id) ?></td> </tr> </table> <div class="related"> <h4><?= __('Related Schedules') ?></h4> <?php if (!empty($command->schedules)): ?> <table cellpadding="0" cellspacing="0"> <tr> <th scope="col"><?= __('Id') ?></th> <th scope="col"><?= __('Command Id') ?></th> <th scope="col"><?= __('Command Send') ?></th> <th scope="col"><?= __('Type') ?></th> <th scope="col"><?= __('Date Time') ?></th> <th scope="col"><?= __('Specific Date') ?></th> <th scope="col"><?= __('Days Week') ?></th> <th scope="col"><?= __('Created') ?></th> <th scope="col"><?= __('Modified') ?></th> <th scope="col" class="actions"><?= __('Actions') ?></th> </tr> <?php foreach ($command->schedules as $schedules): ?> <tr> <td><?= h($schedules->id) ?></td> <td><?= h($schedules->command_id) ?></td> <td><?= h($schedules->command_send) ?></td> <td><?= h($schedules->type) ?></td> <td><?= h($schedules->date_time) ?></td> <td><?= h($schedules->specific_date) ?></td> <td><?= h($schedules->days_week) ?></td> <td><?= h($schedules->created) ?></td> <td><?= h($schedules->modified) ?></td> <td class="actions"> <?= $this->Html->link(__('View'), ['controller' => 'Schedules', 'action' => 'view', $schedules->id]) ?> <?= $this->Html->link(__('Edit'), ['controller' => 'Schedules', 'action' => 'edit', $schedules->id]) ?> <?= $this->Form->postLink(__('Delete'), ['controller' => 'Schedules', 'action' => 'delete', $schedules->id], ['confirm' => __('Are you sure you want to delete # {0}?', $schedules->id)]) ?> </td> </tr> <?php endforeach; ?> </table> <?php endif; ?> </div> </div> <file_sep><a href="/schedules"><h1>Horários</h1></a> <a href="/schedules/timer"><h1>Temporizador</h1></a> <a href="/commands/alter"><h1>Enviar comando</h1></a><file_sep><?php namespace App\Controller; use App\Controller\AppController; class TesteFormularioController extends AppController { public function index() { if($_POST){ echo '<table cellspacing="0" cellpadding="0" border="1" width="40%"> <thead> <tr> <th>Campo</th> <th>Valor</th> </tr> </thead> <tbody>'; foreach($_POST as $key => $value){ echo "<tr> <td>{$key}</td> <td>{$value}</td> </tr>"; } echo '</tbody> </table>'; } die; } }<file_sep><?php /** * @var \App\View\AppView $this * @var \App\Model\Entity\Schedule $schedule */ ?> <nav class="large-3 medium-4 columns" id="actions-sidebar"> <ul class="side-nav"> <li class="heading"><?= __('Actions') ?></li> <li><?= $this->Html->link(__('Edit Schedule'), ['action' => 'edit', $schedule->id]) ?> </li> <li><?= $this->Form->postLink(__('Delete Schedule'), ['action' => 'delete', $schedule->id], ['confirm' => __('Are you sure you want to delete # {0}?', $schedule->id)]) ?> </li> <li><?= $this->Html->link(__('List Schedules'), ['action' => 'index']) ?> </li> <li><?= $this->Html->link(__('New Schedule'), ['action' => 'add']) ?> </li> <li><?= $this->Html->link(__('List Commands'), ['controller' => 'Commands', 'action' => 'index']) ?> </li> <li><?= $this->Html->link(__('New Command'), ['controller' => 'Commands', 'action' => 'add']) ?> </li> </ul> </nav> <div class="schedules view large-9 medium-8 columns content"> <h3><?= h($schedule->id) ?></h3> <table class="vertical-table"> <tr> <th scope="row"><?= __('Command') ?></th> <td><?= $schedule->has('command') ? $this->Html->link($schedule->command->id, ['controller' => 'Commands', 'action' => 'view', $schedule->command->id]) : '' ?></td> </tr> <tr> <th scope="row"><?= __('Command Send') ?></th> <td><?= h($schedule->command_send) ?></td> </tr> <tr> <th scope="row"><?= __('Type') ?></th> <td><?= h($schedule->type) ?></td> </tr> <tr> <th scope="row"><?= __('Specific Date') ?></th> <td><?= h($schedule->specific_date) ?></td> </tr> <tr> <th scope="row"><?= __('Days Week') ?></th> <td><?= h($schedule->days_week) ?></td> </tr> <tr> <th scope="row"><?= __('Id') ?></th> <td><?= $this->Number->format($schedule->id) ?></td> </tr> <tr> <th scope="row"><?= __('Date Time') ?></th> <td><?= h($schedule->date_time) ?></td> </tr> <tr> <th scope="row"><?= __('Created') ?></th> <td><?= h($schedule->created) ?></td> </tr> <tr> <th scope="row"><?= __('Modified') ?></th> <td><?= h($schedule->modified) ?></td> </tr> </table> </div> <file_sep><?php namespace App\Controller; use App\Controller\AppController; /** * Schedules Controller * * @property \App\Model\Table\SchedulesTable $Schedules * * @method \App\Model\Entity\Schedule[]|\Cake\Datasource\ResultSetInterface paginate($object = null, array $settings = []) */ class SchedulesController extends AppController { /** * Index method * * @return \Cake\Http\Response|null */ public function index() { $this->paginate = [ 'contain' => ['Commands'], ]; $schedules = $this->paginate($this->Schedules); $this->set(compact('schedules')); } /** * View method * * @param string|null $id Schedule id. * @return \Cake\Http\Response|null * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function view($id = null) { $schedule = $this->Schedules->get($id, [ 'contain' => ['Commands'], ]); $this->set('schedule', $schedule); } /** * Add method * * @return \Cake\Http\Response|null Redirects on successful add, renders view otherwise. */ public function add() { $schedule = $this->Schedules->newEntity(); if ($this->request->is('post')) { $schedule = $this->Schedules->patchEntity($schedule, $this->request->getData()); if ($this->Schedules->save($schedule)) { $this->Flash->success(__('The schedule has been saved.')); return $this->redirect(['action' => 'index']); } $this->Flash->error(__('The schedule could not be saved. Please, try again.')); } $commands = $this->Schedules->Commands->find('list', ['limit' => 200]); $this->set(compact('schedule', 'commands')); } /** * Edit method * * @param string|null $id Schedule id. * @return \Cake\Http\Response|null Redirects on successful edit, renders view otherwise. * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function edit($id = null) { $schedule = $this->Schedules->get($id, [ 'contain' => [], ]); if ($this->request->is(['patch', 'post', 'put'])) { $schedule = $this->Schedules->patchEntity($schedule, $this->request->getData()); if ($this->Schedules->save($schedule)) { $this->Flash->success(__('The schedule has been saved.')); return $this->redirect(['action' => 'index']); } $this->Flash->error(__('The schedule could not be saved. Please, try again.')); } $commands = $this->Schedules->Commands->find('list', ['limit' => 200]); $this->set(compact('schedule', 'commands')); } /** * Delete method * * @param string|null $id Schedule id. * @return \Cake\Http\Response|null Redirects to index. * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function delete($id = null) { $this->request->allowMethod(['post', 'delete']); $schedule = $this->Schedules->get($id); if ($this->Schedules->delete($schedule)) { $this->Flash->success(__('The schedule has been deleted.')); } else { $this->Flash->error(__('The schedule could not be deleted. Please, try again.')); } return $this->redirect(['action' => 'index']); } public function upCommand(){ set_time_limit(0); for($i = 1; $i <= 15; $i++){ $this->loadmodel('Commands'); $data_inicio = date('Y-m-d H:i:00'); $data_fim = date('Y-m-d H:i:59'); $hora_inicio = date('H:i:00'); $hora_fim = date('H:i:59'); $dia_semana = date('w'); //die($dia_semana); // echo "$data_inicio $data_fim "; $schedules = $this->Schedules->find() ->where([ 'date_time >= ' => $data_inicio, 'date_time <= ' => $data_fim, ]); foreach($schedules as $schedule){ $this->Commands->updateAll([ 'command' => $schedule->command_send, 'executed' => 'n' ], [ 'id' => $schedule->command_id ]); // debug($schedule);die; } // $schedules = $this->Schedules->find() // ->where([ // 'date_time >= ' => $data_inicio, // 'date_time <= ' => $data_fim, // ]); // die; sleep(60); //debug($schedule);die; } die; } public function timer() { if ($this->request->is('post')) { $data = $this->request->getData(); //print_r($data); $date_time = date('Y-m-d H:i:00', strtotime("+{$data['tempo']} minutes", strtotime(date('Y-m-d H:i')))); $this->Schedules->updateAll([ 'date_time' => $date_time, 'command_send' => $data['command'] ], [ 'command_id' => 1, 'type' => 't' ]); } } } <file_sep><?php /** * @var \App\View\AppView $this * @var \App\Model\Entity\Horario $horario */ ?> <nav class="large-3 medium-4 columns" id="actions-sidebar"> <ul class="side-nav"> <li class="heading"><?= __('Actions') ?></li> <li><?= $this->Form->postLink( __('Delete'), ['action' => 'delete', $horario->id], ['confirm' => __('Are you sure you want to delete # {0}?', $horario->id)] ) ?></li> <li><?= $this->Html->link(__('List Horarios'), ['action' => 'index']) ?></li> </ul> </nav> <div class="horarios form large-9 medium-8 columns content"> <?= $this->Form->create($horario) ?> <fieldset> <legend><?= __('Edit Horario') ?></legend> <?php echo $this->Form->control('comando'); //echo $this->Form->control('execucao', ['type' => 'date', 'value' => '2020-10-10']); ?> <div class="input text"> <label for="comando">Execução Data</label> <input type="date" name="execucao_data" maxlength="50" id="execucao_data" value="<?=date('Y-m-d', strtotime($horario->execucao))?>"> </div> <div class="input text"> <label for="comando">Execução Hora</label> <input type="time" name="execucao_hora" maxlength="50" id="execucao_hora" value="<?=date('H:i', strtotime($horario->execucao))?>"> </div> </fieldset> <?= $this->Form->button(__('Submit')) ?> <?= $this->Form->end() ?> </div> <file_sep><?php /** * @var \App\View\AppView $this * @var \App\Model\Entity\Command $command */ ?> <div class="row"> <aside class="column"> <div class="side-nav"> <h4 class="heading"><?= __('Actions') ?></h4> <?= $this->Html->link(__('Home'), ['controller' => 'pages', 'action' => 'painel'], ['class' => 'side-nav-item']) ?> </div> </aside> <div class="column-responsive column-80"> <div class="commands form content"> <?= $this->Form->create($command, ['id' => 'frm']) ?> <fieldset> <legend><?= __('Enviar comando') ?></legend> <?php echo $this->Form->control('command', ['type' => 'hidden']); ?> <?= $this->Form->button(__('On'), ['type' => 'button', 'onclick' => 'sendCommand(\'on\')']) ?> <?= $this->Form->button(__('Off'), ['type' => 'button', 'onclick' => 'sendCommand(\'off\')']) ?> </fieldset> <?= $this->Form->end() ?> </div> </div> </div> <script> function sendCommand(command){ document.getElementById('command').value = command document.getElementById('frm').submit() } </script><file_sep><?php namespace App\Controller; use App\Controller\AppController; /** * Remedios Controller * * @property \App\Model\Table\RemediosTable $Remedios * * @method \App\Model\Entity\Remedio[]|\Cake\Datasource\ResultSetInterface paginate($object = null, array $settings = []) */ class RemediosController extends AppController { /** * Index method * * @return \Cake\Http\Response|null */ public function index() { if ($this->request->is('post')) { $remedio = $this->Remedios->newEntity(); $this->Remedios->save($remedio); $this->redirect(['action' => 'index']); } $this->paginate = [ 'order' => [ 'id' => 'DESC' ] ]; $remedios = $this->paginate($this->Remedios); $this->set(compact('remedios')); } /** * View method * * @param string|null $id Remedio id. * @return \Cake\Http\Response|null * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function view($id = null) { $remedio = $this->Remedios->get($id, [ 'contain' => [], ]); $this->set('remedio', $remedio); } /** * Add method * * @return \Cake\Http\Response|null Redirects on successful add, renders view otherwise. */ public function add() { $remedio = $this->Remedios->newEntity(); if ($this->request->is('post')) { $remedio = $this->Remedios->patchEntity($remedio, $this->request->getData()); if ($this->Remedios->save($remedio)) { $this->Flash->success(__('The remedio has been saved.')); return $this->redirect(['action' => 'index']); } $this->Flash->error(__('The remedio could not be saved. Please, try again.')); } $this->set(compact('remedio')); } /** * Edit method * * @param string|null $id Remedio id. * @return \Cake\Http\Response|null Redirects on successful edit, renders view otherwise. * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function edit($id = null) { $remedio = $this->Remedios->get($id, [ 'contain' => [], ]); if ($this->request->is(['patch', 'post', 'put'])) { $remedio = $this->Remedios->patchEntity($remedio, $this->request->getData()); if ($this->Remedios->save($remedio)) { $this->Flash->success(__('The remedio has been saved.')); return $this->redirect(['action' => 'index']); } $this->Flash->error(__('The remedio could not be saved. Please, try again.')); } $this->set(compact('remedio')); } /** * Delete method * * @param string|null $id Remedio id. * @return \Cake\Http\Response|null Redirects to index. * @throws \Cake\Datasource\Exception\RecordNotFoundException When record not found. */ public function delete($id = null) { $this->request->allowMethod(['post', 'delete']); $remedio = $this->Remedios->get($id); if ($this->Remedios->delete($remedio)) { $this->Flash->success(__('The remedio has been deleted.')); } else { $this->Flash->error(__('The remedio could not be deleted. Please, try again.')); } return $this->redirect(['action' => 'index']); } }
147ab7d2e718172ec9f54d9b1404bcb656da476b
[ "PHP" ]
12
PHP
profrodrigoaffonso/fotos
472876dc20b31ff74e33296795350669d3fc1b16
8b0d16aea9387e550abfe23d1c5a18ebd8f6191f
refs/heads/master
<repo_name>MANJUL99/hacktober<file_sep>/a.cpp #include<bits/stdc++.h> using namespace std; #define ll long long int void fast(){ ios_base::sync_with_stdio(false); cin.tie(NULL); } int main() { fast(); ll q; cin>>q; while(q--){ ll n,a,b; cin>>n>>a>>b; ll inters = a+b-n; ll ans = max(a-inters ,b-inters) +1; cout<<ans<<"\n"; } return 0; }<file_sep>/CR197dp.cpp z #include<bits / stdc++.h> using namespace std; #define fast ios_base::sync_with_stdio(false), cin.tie(NULL), cout.tie(NULL) #define ld long double #define fr first #define sc second #define pb push_back #define eb emplace_back #define pii pair<int, int> #define umap unordered_map<ll, ll> #define prq priority_queue<int> #define inf 1e18 #define test cout << "abcd\n"; #define show1(x) cout << x << "\n" #define show2(x, y) cout << x << " " << y << "\n" #define show3(x, y, z) cout << x << " " << y << " " << z << "\n" #define endl "\n" #define mod (int)1000000007 #define N 2005 int main() { fast; // long long t; long long t, n, p_ = 1, p, x, ans, cost; cin >> t; // cin >> n; // vector<long long> v(n), dp(n); // for (int i = 0; i < n; i++) // { // cin >> v[i]; // } // long long t, n, p = 1, x, ans, cost; while (t--) { // p = p_; s // long long t, n, p, x, ans, cost; cin >> n >> p; vector<long long> v(n), dp(n); for (int i = 0; i < n; i++) { cin >> v[i]; } dp[0] = v[0]; for (int i = 0; i < n - 1; i++) { dp[i + 1] = min(v[i + 1], 2LL * dp[i]); } ans = (long long)4e18; cost = 0; for (long long i = n - 1; i >= 0; i--) { x = p / (1LL << i); cost += (long long)x * dp[i]; p -= x << i; ans = min(ans, cost + (p > 0) * dp[i]); } cout << ans << endl; p_ += 1; } return 0; }<file_sep>/d.cpp #include <bits/stdc++.h> using namespace std; #define ll long long #define ar array const int mxN=3e5; int n, a[mxN], b[mxN]; struct segtree { int a[2*mxN]; void upd(int i, int x) { for(a[i+=n]=x; i/=2; ) a[i]=min(a[2*i], a[2*i+1]); } int qry(int l, int r) { int b=1e9; for(l+=n, r+=n; l<r; ++l/=2, r/=2) { if(l&1) b=min(a[l], b); if(r&1) b=min(a[r-1], b); } return b; } } st; void solve() { cin >> n; set<ar<int, 2>> s; for(int i=0; i<n; ++i) { cin >> a[i]; s.insert({a[i], i}); st.upd(i, a[i]); } for(int i=0; i<n; ++i) cin >> b[i]; for(int i=0; i<n; ++i) { auto it=s.lower_bound({b[i], 0}); if((*it)[0]!=b[i]||st.qry(0, (*it)[1]+1)<b[i]) { cout << "NO\n"; return; } st.upd((*it)[1], 1e9); s.erase(it); } cout << "YES\n"; } int main() { ios::sync_with_stdio(0); cin.tie(0); int t; cin >> t; while(t--) solve(); }<file_sep>/comb.cpp #include<bits/stdc++.h> using namespace std; int modinverse(int a, int m){ // fermat mod prime number(m) return pow(a,m-2); } int main(){ int n,k; cin>>n>>k; // int dp[n+1]; // dp[0]=1; // for(int i=1;i<=n;i++) dp[i] = dp[i-1]*i; // int val = dp[n]/(dp[k]*dp[n-k]); // cout<<val<<endl; // int dp[n+1][n+1]; // memset(dp,0,sizeof(dp)); // for(int i=0;i<=n;i++){ // for(int j=0;j<=i;j++){ // if(i==0||j==0) { // dp[i][j]=1; // continue; // } // dp[i][j] = dp[i-1][j]+dp[i-1][j-1]; // } // } // for(int i=0;i<=n;i++){ // for(int j=0;j<=i;j++){ // cout<<dp[i][j]<<" "; // } // cout<<endl; // } // cout<<dp[n][k]<<"\n"; return 0; }<file_sep>/ANUDTC.cpp #include <iostream> #include <bits/stdc++.h> using namespace std; int main(){ int T=0; cin>>T; while(T--){ int N=0; cin>>N; if(360%N==0){ cout<<"y "; } else cout<<"n "; if(N<=360){ cout<<"y "; } else cout<<"n "; if(360-(N-1+1)*(N-1)/2>N-1)cout<<"y\n"; else cout<<"n\n"; } return 0; } <file_sep>/e.cpp #include <bits/stdc++.h> using namespace std; typedef long long ll; const int mx = 200050; vector<int> vec[mx]; ll arr[mx]; ll dp[mx]; ll ans ; int n; int dfs(int u , int from) { int i,v,l=vec[u].size(); int sum = 1; for(i = 0 ; i < l ; i++) { v = vec[u][i]; if(v == from)continue; sum += dfs(v,u); } return arr[u] = sum; } void dfs2(int u , int from) { int i,j,k,v,l=vec[u].size(); ll here = arr[u]; if(u==1) dp[u]=ans; else { dp[u] = dp[from] - arr[u] + arr[from]; } ans = max(ans,dp[u]); for(i = 0 ; i < l ; i++) { v = vec[u][i]; if(v == from)continue; arr[u] = n-arr[v]; dfs2(v,u); } arr[u] = here; } int main() { int m,i,j,k,u,v,p,q,r; scanf("%d",&n); for(i = 1 ; i < n ; i++) { scanf("%d %d",&u,&v); vec[u].push_back(v); vec[v].push_back(u); } ans = dfs(1,-1); ans = 0; for(i = 1 ;i <= n; i++) ans += arr[i]; dfs2(1,0); printf("%lld\n",ans); return 0; }<file_sep>/ALEXNUMB.cpp #include <iostream> #include <bits/stdc++.h> using namespace std; int main(){ int T=0; cin>>T; while(T--){ long long int n=0; cin>>n; //vector<int>a(n); int i=0; int temp=0; while(i<n){ cin>>temp; i++; } long long int ans=n*(n-1)/2; cout<<ans<<"\n"; } return 0; } <file_sep>/AMMEAT.cpp #include <iostream> #include <bits/stdc++.h> using namespace std; int main(){ int T=0; cin>>T; while(T--){ long long int N=0,M=0; cin>>N>>M; vector<long long int>plate(N); int i=0; int flag=0; while(i<N){ cin>>plate[i];i++; } sort(plate.begin(),plate.end()); long long int sum=0; for(i=N-1;i>=0;i--){ sum+=plate[i]; if(sum>=M){ cout<<N-1-i+1<<"\n"; flag=1;break; } } if(flag==0)cout<<-1<<"\n"; } return 0; } <file_sep>/EDGE_DETECTION.py #!/usr/bin/env python # coding: utf-8 # In[1]: # sobel.py import cv2 import numpy as np import math import matplotlib.pyplot as plt # In[2]: img = cv2.imread('objects.png') img[:, :, 0], img[:, :, 2] = img[:, :, 2], img[:, :, 0].copy() plt.imshow(img) # In[3]: img = cv2.imread('objects.png', cv2.IMREAD_GRAYSCALE) img = np.array(img) rows, cols = img.shape plt.imshow(img, cmap='gray') # In[4]: # Function to save new image def save_gray(img, file_name): z = img.copy() cv2.imwrite(file_name + ".png", z, [0]) # In[5]: Gx = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]] Gy = [[-1, -2, -1], [0, 0, 0], [1, 2, 1]] Gx = np.array(Gx) Gy = np.array(Gy) # In[6]: shape = img.shape rows = shape[0] columns = shape[1] mag = np.zeros((img.shape)) magx = np.zeros((img.shape)) magy = np.zeros((img.shape)) angle = np.zeros((img.shape)) # In[7]: for i in range(1, rows-1): for j in range(1, columns - 1): slice = img[i-1:i+2, j-1:j+2] S1 = sum(sum(slice * Gx)) S2 = sum(sum(slice * Gy)) magx[i+1][j+1] = S1 magy[i+1][j+1] = S2 if S1 != 0: angle[i+1][j+1] = S2/S1 mag[i+1][j+1] = max(70, math.sqrt(S1**2+S2**2)) if S2 != 0 or angle[i+1][j+1] != 0: mag[i+1][j+1] = mag[i+1][j+1] * (angle[i+1][j+1] * (S1/S2)) # In[8]: save_gray(magx, 'sobelx') save_gray(magy, 'sobely') save_gray(mag, 'sobel') # In[9]: magx = cv2.imread('sobelx.png', cv2.IMREAD_GRAYSCALE) plt.imshow(magx, cmap='gray') # In[10]: magy = cv2.imread('sobely.png', cv2.IMREAD_GRAYSCALE) plt.imshow(magy, cmap='gray') # In[11]: mag = cv2.imread('sobel.png', cv2.IMREAD_GRAYSCALE) plt.imshow(mag, cmap='gray') # In[12]: X = int(input("Enter the coordinate X at which angle is required: ")) Y = int(input("Enter the coordinate Y at which angle is required: ")) print(angle[X][Y]) <file_sep>/coinchange.cpp #include<bits/stdc++.h> using namespace std; int coinchange(int a[],int m,int n) { int table[n+1]; memset(table,0,sizeof(table)); table[0]=1; for(int i=0;i<m;i++) { for(int j=a[i];j<=n;j++) { table[j]+=table[j-a[i]]; } } for(int i=0;i<n+1;i++) cout<<table[i]<<endl; return table[n]; } int main() { int t; cin>>t; while(t--) { int n=20, m=3; int a[3]={3,5,10}; cout<<coinchange(a,3,20); } }<file_sep>/c.cpp #include<bits/stdc++.h> using namespace std; int main() { int n,m; cin>>n>>m; int dp[n]; memset(dp,0,sizeof(dp)); vector<pair<int,int> >vec; for(int i=0;i<m;i++){ int t,l,r; cin>>t>>l>>r; l--; r--; if(t==1) { dp[l]++; dp[r]--; } else vec.push_back({l,r}); } for(int i=1;i<n;i++) dp[i]+=dp[i-1]; for(int i=0;i<vec.size();i++){ int l = vec[i].first; int r = vec[i].second; int cnt =0; for(int j=l;j<r;j++){ if(dp[j]<=0) cnt++; } if(cnt==0){ cout<<"NO\n"; return 0; } } cout<<"YES\n"; int ans = n+1; cout<<ans<<" "; for(int i=1;i<n;i++){ if(dp[i-1]<=0) ans--; else ans++; cout<<ans<<" "; } cout<<endl; return 0; }<file_sep>/countsales.cpp #include <bits/stdc++.h> using namespace std; #define ll long long const ll N = 1e5 + 5; ll n; vector<vector<ll>> adj(N); vector<bool> vis(N); unordered_map<ll, unordered_map<char, ll>> ans; string a; void clean() { ans.clear(); for(ll i=0 ; i<=n ; i++) { adj[i].clear(); vis[i] = false; } } unordered_map<char, ll> dfs(ll src=1) { vis[src] = true; unordered_map<char,ll> cnt; cnt[a[src-1]]++; for(auto it:adj[src]) { if(!vis[it]) { unordered_map<char, ll> next = dfs(it); for(auto c: next) cnt[c.first] += c.second; } } ans[src] = cnt; return cnt; } int main() { ll t; cin >> t; while(t--) { cin >> n; clean(); ll q; cin >> q; cin >> a; for(ll i=0 ; i<n-1 ; i++) { ll x, y; cin >> x >> y; adj[x].push_back(y); adj[y].push_back(x); } dfs(); while(q--) { ll x; char c; cin >> x >> c; cout << ans[x][c] << endl; } } return 0; }<file_sep>/AlltraversalsTree.cpp #include<bits/stdc++.h> using namespace std; struct TreeNode { int val; TreeNode*left, *right; TreeNode(int data) { val=data; left=right=NULL; } }; void Inorder(TreeNode*root) { if(root == NULL) return ; // indent code properly Inorder(root->left); cout<<root->val; Inorder(root->right); } void Preorder(TreeNode*root) { if(root == NULL) return ; // indent code properly cout<<root->val; Inorder(root->left); Inorder(root->right); } void Postorder(TreeNode*root) { if(!root) return ; Inorder(root->left); Inorder(root->right); cout<<root->val; } void LevelOrder(TreeNode*root) { if(!root) return; // indent code properly queue<TreeNode*>q; q.push(root); while(!q.empty()) { TreeNode * t=q.front(); cout<<t->val<<" "; q.pop(); if(t->left) q.push(t->left); if(t->right) q.push(t->right); } } int main() { TreeNode *root =new TreeNode(1); // indent code properly root->left = new TreeNode(2); root->right =new TreeNode(3); root->left->left =new TreeNode(4); root->left->right =new TreeNode(5); cout << "\nPreorder traversal of binary tree is \n"; Preorder(root); cout << "\nInorder traversal of binary tree is \n"; Inorder(root); cout << "\nPostorder traversal of binary tree is \n"; Postorder(root); cout << "\nLevelOrder traversal of binary tree is \n"; LevelOrder(root); } <file_sep>/bottomview1.cpp void bottomview(Node*root) { if(!root) return; int hd=0; root->hd=0; while(!q.empty()) { Node *temp=q.front(); q.pop(); m[hd]=temp->data; if(temp->left) { temp->left->hd=hd-1; q.push(temp->left); } if(temp->right) { temp->right->hd=hd+1; q.push(temp->right); } } for(auto it:m) { cout<<it->second; } }<file_sep>/linebresenham.cpp //LINE-BRESENHAM GENERALIZED #include<graphics.h> #include<iostream> using namespace std; #define abs(x) (x>0?x:x*-1)//but()can be ignored in this case void bresenham(int x1,int y1,int x2,int y2) { int dx,dy,p,x,y,temp,swap=0,xend; dx=abs(x2-x1); dy=abs(y2-y1); if(x1>x2){ x=x2; y=y2; xend=x1; } else { x=x1;y=y1;xend=x2;} if(dy>dx) { temp=dx; dx=dy; dy=temp; swap=1; } p=2*dy-dx; while(x<=xend) { putpixel(x,y,WHITE); delay(1); if(swap==0) x++; else y++; if(p>0) { p=p+2*dy-2*dx; if(swap==0) y++; else x++; } else p=p+2*dy; } } int main() { int gd=DETECT,gm,x1,y1,x2,y2; cin>>x1>>y1>>x2>>y2; initgraph(&gd,&gm,NULL); bresenham(x1,y1,x2,y2); getch(); closegraph(); return 0; } <file_sep>/eggdrop.cpp #include<bits/stdc++.h> using namespace std; void doEggDropping(int n,int k) { int d[n+1][k+1]; for(int i=0;i<n+1;i++) d[i][1]=i; for(int i=0;i<k+1;i++) d[1][i]=1; for(int i=2;i<n+1;i++) { for(int j=2;j<k+1;j++) { d[i][j]=INT_MAX; for(int x=1;x<i;x++) { int t=max(d[i-x][j],d[x-1][j-1])+1; if(t<d[i][j]) d[i][j]=t; } } } // for( int a[]:d[n][k]) // cout<<to_string(a)<<endl; cout<<d[n][k]; } int main() { int n,k; cin>>n,k; doEggDropping(n,k); }<file_sep>/bayesdecisiontheory.cpp // bayesdecisiontheory.cpp #include <bits/stdc++.h> using namespace std; double cond_prob(int fval, int j, int cls, vector<vector<int>> &f, vector<int> &c, int t) { double num = 0, denom = 0; int i; for (i = 0; i < t; i++) { denom += (c[i] == cls); num += (f[i][j] == fval && c[i] == cls); } if (denom == 0) return 0; return num / denom; } int main() { int n, x, t, i, j; cout << "Enter x number of features and n number of classes\n"; cin >> x >> n; cout << "Enter number of test sets\n"; cin >> t; vector<vector<int>> f(t, vector<int>(x)); vector<int> c(t); cout << "Enter features for training sets followed by their class\n"; for (i = 0; i < t; i++) { // cout<<"Enter features for "<< i <<"training set\n"; for (j = 0; j < x; j++) cin >> f[i][j]; // cout<<"Enter class for the specific training set (0 to n-1)\n"; cin >> c[i]; } vector<double> pc(n); // probability of classes P(c) for (i = 0; i < t; i++) pc[c[i]]++; for (i = 0; i < n; i++) pc[i] /= t; cout << "Now, Enter Features of the new object and its classes will be determined\n"; vector<int> fo(x); for (i = 0; i < x; i++) cin >> fo[i]; vector<double> cp(n, 1); // product of p(f1/ci)*p(f2/ci)*..*p(ci) for (i = 0; i < n; cp[i] *= pc[i], i++) for (j = 0; j < x; j++) cp[i] *= cond_prob(fo[j], j, i, f, c, t); double ma = 0, mai; for (i = 0; i < n; i++) { if (cp[i] > ma) { ma = cp[i]; mai = i; } cout << cp[i] << ' '; } cout << '\n'; cout << "Class is: " << mai << " with probaility " << ma << '\n'; return 0; } /* Enter x number of features and n number of classes 3 2 Enter number of test sets 10 Enter features for training sets followed by their class 0 0 0 1 0 0 0 0 0 0 0 1 1 0 0 0 1 0 1 1 1 1 1 0 1 1 1 1 1 1 0 0 0 1 1 0 0 1 1 1 Now, Enter Features of the new object and its classes will be determined 1 0 1 0.048 0.072 Class is: 1 with probaility 0.072 */ <file_sep>/midsquare.py """ Created on Fri Apr 12 12:51:10 2019 @author: psp_643a """ # midsquare.py seed_number = int(input("Enter a 4 digit number:\n[####] ")) n = seed_number vis = set() c = 0 ans=[] while n not in vis: c += 1 vis.add(n) n = int(str(n * n).zfill(8)[2:6]) print(c, ":", n) ans.append(n); print("We began with ", seed_number, "and have repeated ourselves after ", c, "steps with ", n) print("Random numbers are: ") print(ans)<file_sep>/NITIKA.cpp #include <iostream> #include <bits/stdc++.h> using namespace std; string toup(char ch){ char ch1; string S=""; if(int(ch)>=97&&int(ch)<=122){ ch1=char(int(ch)-25-7); S+=ch1; } else{ S+=ch; } return S; } string tolow(char ch){ char ch1; string S=""; if(int(ch)>=65&&int(ch)<=90){ ch1=char(int(ch)+25+7); S+=ch1; } else S+=ch; return S; } int main(){ int T=0; cin>>T; cin.ignore(); string S; while(T--){ getline(cin,S); // cout<<S; string S1="";int count=0; for(int i=S.length()-1;i>=0;i--){ if(S[i]==' ')continue; if(i-1>=0){ if(S[i-1]==' '){ if(count==0){ S1=toup(S[i])+S1; count++; } else{ S1=toup(S[i])+". "+S1; count++; } } else{ if(count==0){ S1=tolow(S[i])+S1; } // else{ // } } } else{ if(count==0){ S1=toup(S[i])+S1; } else{ S1=toup(S[i])+". "+S1; } } } cout<<S1<<"\n"; } } <file_sep>/distancevectorrouting.cpp //DISTANCE VECTOR ROUTING - bellman ford algorithm #include<iostream> #include<vector> #include<utility> #include<climits> using namespace std; typedef long long LL; typedef pair<LL,int> PI; class graph { int nodes,edges; vector<PI> *a; vector<LL> *d; vector<int> *par,*rout; void printpath(int,int); public: graph(){} graph(int nodes,int edges) { this->nodes=nodes; this->edges=edges; a=new vector<PI>[nodes+1]; d=new vector<LL>[nodes+1]; par=new vector<int>[nodes+1]; rout=new vector<int>[nodes+1]; for(int i=0 ; i<=nodes ; i++) { d[i].assign(nodes+1,LLONG_MAX); par[i].assign(nodes+1,-1); rout[i].assign(nodes+1,-1); } } void addedges() { int x,y,i; LL w; for(i=1;i<=edges;i++) { cin>>x>>y>>w; a[x].push_back({w,y}); //a[y].push_back({w,x}); } } void bellmanford(int); void display(int); }; void graph::bellmanford(int s) { d[s][s]=0; int i,j,k,u,v,flag=0; LL w; for(i=1;i<=nodes-1 ; i++) { for(j=0;j<nodes;j++)//for all edges for(k=0;k<a[j].size();k++) { u=j; v=a[j][k].second; w=a[j][k].first; if(d[s][u]!=LLONG_MAX && d[s][u] + w < d[s][v]) { d[s][v]=d[s][u]+w; par[s][v]=u; } } } display(s); } void graph::printpath(int i,int s) { vector<int> z; int x=i; while(i!=-1) { z.push_back(i); i=par[s][i]; } for(i=z.size()-1;i>=0;i--) cout<<z[i]<<" "; if(z.size()>1) rout[s][x]=z[z.size()-2]; } void graph::display(int s) { cout<<"\n\nFOR VERTEX "<<s<<"\n"; cout<<"vertex\tdistance\tpath\t\tNEXT HOP\n"; for(int i=0;i<nodes;i++) { if(d[s][i]!=LLONG_MAX) { cout<<i<<"\t"<<d[s][i]<<"\t\t"; printpath(i,s); cout<<"\t\t"<<rout[s][i]; } else cout<<i<<"\t"<<"INF"; cout<<"\n"; } } int main() { graph g; int n,m; cin>>n>>m; g=graph(n,m); g.addedges(); for(int i=0 ; i<n ;i++) g.bellmanford(i); return 0; } /* 5 8 0 1 -1 0 2 4 1 2 3 1 3 2 1 4 2 3 2 5 3 1 1 4 3 -3 */ <file_sep>/b.cpp #include<bits/stdc++.h> using namespace std; #define ll long long int void fast(){ ios_base::sync_with_stdio(false); cin.tie(NULL); } int main() { // fast(); ll n; cin>>n; string s; cin>>s; ll dp[n][26]; memset(dp,0,sizeof(dp)); int x = s[0]-'a'; dp[0][x]++; for(int i=1;i<n;i++){ x = s[i]-'a'; dp[i][x]++; for(int j=0;j<26;j++){ dp[i][j]+=dp[i-1][j]; } } ll q; cin>>q; while(q--){ string t; cin>>t; ll a[26]; memset(a,0,sizeof(a)); for(int i=0;i<t.size();i++){ x = t[i]-'a'; a[x]++; } ll low = 0,high =n-1; ll ans = -1; while(low<=high){ ll mid = (low+high)/2; ll flag=0; for(int i=0;i<26;i++){ if(dp[mid][i]<a[i]){ flag=1; break; } } if(flag==0){ high =mid-1; ans = mid; } else low =mid+1; } cout<<ans+1<<"\n"; } return 0; }
9282cc44b86efeda4491dd4a55f6f8cb47eec51d
[ "Python", "C++" ]
21
C++
MANJUL99/hacktober
6871217e7e5ad7787614cad98256cd1134edcdcc
f2889438b7f86d118a5b661e1afece628f93c508
refs/heads/master
<repo_name>sycomix/AI_IDS<file_sep>/run_HAST_I_noise_V3.py # Run HAST I with noises + creating a graph for datatest accuracy for each epoch import os # self made functions and classes import AI_IDS.create_dataset_v2 as dataset from AI_IDS.HAST_I_NN import HAST_I from AI_IDS.test_NN_wo_load import test_wo_load as twol # Pytorch imports import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms import matplotlib.ticker as mtick import matplotlib.pyplot as plt import numpy as np # Defining the data for the NN batch_size = 64 train_dir = os.path.join(os.getcwd(), "..", "Dataset_HAST_I_0_big") # The location of the Dataset folder train_set = dataset.create_dataset(train_dir, "EoS.npy") # Creating the dataset trainloader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0) classes = ('safe', 'exploit') # 0 = safe, 1 = exploit # Defining the NN # The lr, momentum and transform are based on pytorch example net = HAST_I() criterion = nn.CrossEntropyLoss() # optimizer = optim.RMSprop(net.parameters(), lr=0.001, momentum=0.9) optimizer = optim.SGD(net.parameters(), lr=0.001, weight_decay=0) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.5) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # NN Variables definition epochs = 10 packets = 128 mtu = 1514 cols = 32 rows = int(np.ceil(mtu / cols)) extra_packets = 3 samples = len(os.listdir(train_dir)) batch_per_epoch = np.ceil(samples / batch_size) accuracy_check_step = 4 # List of accuracy results for different test sets and through the training. # Used for the print statistics part at the end Datatest_100_accuracy = [] Datatest_75_accuracy = [] Datatest_50_accuracy = [] Datatest_25_accuracy = [] Datatrain_accuracy = [] # Training the NN for epoch in range(epochs): # loop over the dataset multiple times print("epoch = ", str(epoch)) print("lr = ", scheduler.get_last_lr()[0]) running_loss = 0.0 correct_train = 0 total_train = 0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # Move the stream at random amount of packets. The indentation of the stream is in the range [0, 3] indent = np.random.randint(0, extra_packets + 1) cropped_inputs = inputs[:, :, :, (cols * (indent + 1)):((packets * cols) + (cols * (indent + 1)))] # forward + backward + optimize outputs = net(cropped_inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # calculate statistics _, predicted_train = torch.max(outputs.data, 1) total_train += labels.size(0) correct_train += (predicted_train == labels).sum().item() running_loss += loss.item() # print statistics after each batch of samples if i % accuracy_check_step == accuracy_check_step - 1: # print every accuracy_check_step mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 4)) accuracy_train = 100.0 * correct_train / total_train Datatrain_accuracy.append(accuracy_train) print('accuracy = %.2f %%' % accuracy_train) running_loss = 0.0 # Validation after each epoch net.eval() with torch.no_grad(): Datatest_100_accuracy.append(twol("Datatest_HAST_I_0", net.state_dict())) Datatest_75_accuracy.append(twol("Datatest_HAST_I_diff_safe&safe", net.state_dict())) Datatest_50_accuracy.append(twol("Datatest_HAST_I_0xeb&safe", net.state_dict())) Datatest_25_accuracy.append(twol("Datatest_HAST_I_0xeb&safe&diff_safe", net.state_dict())) net.train() # update lr scheduler.step() print('Finished Training') # Saving NN state print('Saving state') torch.save({ 'epoch': epochs, 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss }, os.path.join(os.getcwd(), "metasploit_post-training_NN", "NN_post_training_HAST_I_6_e_V2")) # Printing the Statistics graph print("Printings statistics") # Defining the plots train_accuracy_precision = accuracy_check_step / batch_per_epoch plt.plot(np.arange(1, len(Datatest_100_accuracy) + 1), Datatest_100_accuracy, label="100% similar", marker='o') plt.plot(np.arange(1, len(Datatest_75_accuracy) + 1), Datatest_75_accuracy, label="75% similar", marker='o') plt.plot(np.arange(1, len(Datatest_50_accuracy) + 1), Datatest_50_accuracy, label="50% similar", marker='o') plt.plot(np.arange(1, len(Datatest_25_accuracy) + 1), Datatest_25_accuracy, label="25% similar", marker='o') plt.plot(np.arange(train_accuracy_precision, (len(Datatrain_accuracy) * train_accuracy_precision) + train_accuracy_precision, step=train_accuracy_precision), Datatrain_accuracy, "k--", label="Datatrain accuracy") # Defining the grids plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter()) plt.xticks(np.arange(1, epochs + 1, 1)) # Creating labels and titles plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.title('Accuracy Vs. Epochs - SGD') plt.legend() # Saving and showing the plot plot_name = "Accuracy Vs epochs for" + str(epochs) + " SGD.png" plt.savefig(os.path.join(os.getcwd(), plot_name)) # plt.show() <file_sep>/parsing_scripts/rdp_connect.py import os # TODO make the code suitable for ubuntu terminal rdp to windows (maybe via metasploit) def rdp_connect(ip_addr): cmd = "mstsc /control /admin /V:" + str(ip_addr) os.system(cmd) <file_sep>/create_packets_scripts/create_data.py import os import time from metasploit import * from tcpdump import * from remmina import * from _0xeb import * from rdesktop import * packet_num = 50 delay_exploit = 2 folder_exploit = "Data_exploit" folder_safe = "Data_safe" folder_0xeb = "Data_0xeb" folder_rdesktop = "Data_rdesktop" index_file_0xeb = open(folder_0xeb + "/index", "r+") index_file_safe = open(folder_safe + "/index", "r+") index_file_exploit = open(folder_exploit + "/index", "r+") index_file_rdesktop = open(folder_rdesktop + "/index", "r+") print("Reading indexes from index files...") index_safe = int(index_file_safe.read()) index_exploit = int(index_file_exploit.read()) index_0xeb = int(index_file_0xeb.read()) index_rdesktop = int(index_file_rdesktop.read()) print("Starting 0xeb packets from index " + str(index_0xeb)) print("Starting exploit packets from index " + str(index_exploit)) print("Starting safe packets from index " + str(index_safe)) print("Starting rdesktop packets from index " + str(index_rdesktop)) """ # Create exploit packets print("Creating exploit packets...") print("Opening Metasploit...") openMetasploit() print("Setting exploit parameters...") setupExploit() for index in range(index_exploit, index_exploit + packet_num): print("Packet no. " + str(index)) packet_file = folder_exploit + "/exploit" + str(index) runTcpdump(packet_file) runExploit() time.sleep(delay_exploit) exitMetasploit() # Write new index to index file index_file_exploit.write(str(index_exploit + packet_num)) # Create safe packets print("\nCreating safe packets...") for index in range(index_safe, index_safe + packet_num): print("Packet no. " + str(index)) packet_file = folder_safe + "/safe" + str(index) runTcpdump(packet_file) runRemmina() stopTcpdump() # Write new index to index file index_file_safe.write(str(index_safe + packet_num)) for index in range(index_0xeb, index_0xeb + packet_num): print("Packet no. " + str(index)) packet_file = folder_0xeb + "/0xeb" + str(index) runTcpdump(packet_file) run0xeb() time.sleep(1) """ for index in range(index_rdesktop, index_rdesktop + packet_num): time.sleep(1) print("Packet no. " + str(index)) packet_file = folder_rdesktop + "/rdesktop" + str(index) runTcpdump(packet_file) runRDesktop() stopTcpdump() print("Done... :)") <file_sep>/parsing_datatest_draft.py import os from AI_IDS.parsing_scripts.sniff2img_hast_i import sniff2img exploit_dir = os.listdir("../Dataset/exploit") safe_dir = os.listdir("../Dataset/safe") for f in range(len(exploit_dir)): sniff2img(os.path.join("../Dataset/exploit", exploit_dir[f]), os.path.join("../Dataset/exploit_parsed_big", str(f)), 132, "n", 0) print("finished", f) for f in range(len(safe_dir)): sniff2img(os.path.join("../Dataset/safe", safe_dir[f]), os.path.join("../Dataset/safe_parsed_big", str(f + len(exploit_dir))), 132, "o", 0) print("finished", f) #################################################################################################################################### import os from AI_IDS.parsing_scripts.sniff2img_hast_i import sniff2img exploit_dir = os.listdir("../Datatest/exploit") safe_dir = os.listdir("../Datatest/safe") for f in range(len(safe_dir)): sniff2img(os.path.join("../Datatest/safe", safe_dir[f]), os.path.join("../Datatest/safe_moved_l_1", str(f + len(exploit_dir))), 128, "l", 1) print("finished",f) for f in range(len(safe_dir)): sniff2img(os.path.join("../Datatest/safe", safe_dir[f]), os.path.join("../Datatest/safe_moved_l_2", str(f + len(exploit_dir))), 128, "l", 2) print("finished",f) for f in range(len(safe_dir)): sniff2img(os.path.join("../Datatest/safe", safe_dir[f]), os.path.join("../Datatest/safe_moved_l_3", str(f + len(exploit_dir))), 128, "l", 3) print("finished",f) #################################################################################################################################### import os from AI_IDS.parsing_scripts.sniff2img_hast_i import sniff2img exploit_dir = os.listdir("../Datatest/exploit") safe_dir = os.listdir("../Datatest/safe_diff") for f in range(len(safe_dir)): sniff2img(os.path.join("../Datatest/safe_diff", safe_dir[f]), os.path.join("../Datatest/safe_diff_parsed", str(f + len(exploit_dir))), 128, "l", 0) print("finished",f) <file_sep>/HAST_I_NN.py # HAST I NN. Based on HAST I from https://ieeexplore.ieee.org/document/8171733 from abc import ABC import torch.nn as nn import torch.nn.functional as f class HAST_I(nn.Module, ABC): def __init__(self): super(HAST_I, self).__init__() self.conv1 = nn.Conv2d(1, 128, 32) # 128 Channels self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(128, 32, 4) self.fc1 = nn.Linear(64896, 128) self.fc2 = nn.Linear(128, 32) self.fc3 = nn.Linear(32, 2) # Changed to 2 classes def forward(self, x): x = self.pool(f.relu(self.conv1(x))) x = self.pool(f.relu(self.conv2(x))) x = x.view(-1, 64896) x = f.relu(self.fc1(x)) x = f.relu(self.fc2(x)) x = self.fc3(x) return x
fe8d022709e47ede114dc66f5f8123c50bdabdd6
[ "Python" ]
5
Python
sycomix/AI_IDS
9c41942ac408ed1f8012f8bf5ac9f9ee2be4bd26
9614100e478e1dd0713234df49a188c2e08e705c
refs/heads/master
<file_sep>from libmproxy import script, flow import tutils class TestScript: def test_simple(self): s = flow.State() fm = flow.FlowMaster(None, s) ctx = flow.ScriptContext(fm) p = script.Script(tutils.test_data.path("scripts/a.py"), ctx) p.load() assert "here" in p.ns assert p.run("here") == (True, 1) assert p.run("here") == (True, 2) ret = p.run("errargs") assert not ret[0] assert len(ret[1]) == 2 # Check reload p.load() assert p.run("here") == (True, 1) def test_duplicate_flow(self): s = flow.State() fm = flow.FlowMaster(None, s) fm.load_script(tutils.test_data.path("scripts/duplicate_flow.py")) r = tutils.treq() fm.handle_request(r) assert fm.state.flow_count() == 2 assert not fm.state.view[0].request.is_replay() assert fm.state.view[1].request.is_replay() def test_err(self): s = flow.State() fm = flow.FlowMaster(None, s) ctx = flow.ScriptContext(fm) s = script.Script("nonexistent", ctx) tutils.raises( "no such file", s.load ) s = script.Script(tutils.test_data.path("scripts"), ctx) tutils.raises( "not a file", s.load ) s = script.Script(tutils.test_data.path("scripts/syntaxerr.py"), ctx) tutils.raises( script.ScriptError, s.load ) s = script.Script(tutils.test_data.path("scripts/loaderr.py"), ctx) tutils.raises( script.ScriptError, s.load ) <file_sep> How to install the __mitmproxy__ certificate authority in Windows 7: <ol class="tlist"> <li> Copy the ~/.mitmproxy/mitmproxy-ca-cert.p12 file to the target system. </li> <li> Double-click the certificate file. You should see a certificate import wizard: <img src="@!urlTo('win7-wizard.png')!@"/> </li> <li> Click "Next" until you're prompted for the certificate store: <img src="@!urlTo('win7-certstore.png')!@"/> </li> <li> <p>Select "Place all certificates in the following store", and select "Trusted Root Certification Authorities":</p> <img src="@!urlTo('win7-certstore-trustedroot.png')!@"/> </li> <li> Click "Next" and "Finish". </li> </ol> <file_sep>from libmproxy.flow import Response from netlib.odict import ODictCaseless """ This example shows two ways to redirect flows to other destinations. """ def request(context, flow): if flow.request.host.endswith("example.com"): resp = Response(flow.request, [1,1], 200, "OK", ODictCaseless([["Content-Type","text/html"]]), "helloworld", None) flow.request.reply(resp) if flow.request.host.endswith("example.org"): flow.request.host = "mitmproxy.org" flow.request.headers["Host"] = ["mitmproxy.org"] <file_sep>#!/usr/bin/env python """ This example builds on mitmproxy's base proxying infrastructure to implement functionality similar to the "sticky cookies" option. This is at a lower level than the Flow mechanism, so we're dealing directly with request and response objects. """ from libmproxy import controller, proxy import os class StickyMaster(controller.Master): def __init__(self, server): controller.Master.__init__(self, server) self.stickyhosts = {} def run(self): try: return controller.Master.run(self) except KeyboardInterrupt: self.shutdown() def handle_request(self, msg): hid = (msg.host, msg.port) if msg.headers["cookie"]: self.stickyhosts[hid] = msg.headers["cookie"] elif hid in self.stickyhosts: msg.headers["cookie"] = self.stickyhosts[hid] msg.reply() def handle_response(self, msg): hid = (msg.request.host, msg.request.port) if msg.headers["set-cookie"]: self.stickyhosts[hid] = msg.headers["set-cookie"] msg.reply() config = proxy.ProxyConfig( cacert = os.path.expanduser("~/.mitmproxy/mitmproxy-ca.pem") ) server = proxy.ProxyServer(config, 8080) m = StickyMaster(server) m.run() <file_sep>#!/usr/bin/env python """ This example shows how to build a proxy based on mitmproxy's Flow primitives. Note that request and response messages are not automatically replied to, so we need to implement handlers to do this. """ import os from libmproxy import proxy, flow class MyMaster(flow.FlowMaster): def run(self): try: flow.FlowMaster.run(self) except KeyboardInterrupt: self.shutdown() def handle_request(self, r): f = flow.FlowMaster.handle_request(self, r) if f: r.reply() return f def handle_response(self, r): f = flow.FlowMaster.handle_response(self, r) if f: r.reply() print f return f config = proxy.ProxyConfig( cacert = os.path.expanduser("~/.mitmproxy/mitmproxy-ca.pem") ) state = flow.State() server = proxy.ProxyServer(config, 8080) m = MyMaster(server, state) m.run() <file_sep>import urwid import common from .. import filt, version footer = [ ("heading", 'mitmproxy v%s '%version.VERSION), ('heading_key', "q"), ":back ", ] class HelpView(urwid.ListBox): def __init__(self, master, help_context, state): self.master, self.state = master, state self.help_context = help_context or [] urwid.ListBox.__init__( self, self.helptext() ) def helptext(self): text = [] text.append(urwid.Text([("head", "This view:\n")])) text.extend(self.help_context) text.append(urwid.Text([("head", "\n\nMovement:\n")])) keys = [ ("j, k", "up, down"), ("h, l", "left, right (in some contexts)"), ("space", "page down"), ("pg up/down", "page up/down"), ("arrows", "up, down, left, right"), ] text.extend(common.format_keyvals(keys, key="key", val="text", indent=4)) text.append(urwid.Text([("head", "\n\nGlobal keys:\n")])) keys = [ ("c", "client replay"), ("H", "edit global header set patterns"), ("i", "set interception pattern"), ("M", "change global default display mode"), (None, common.highlight_key("automatic", "a") + [("text", ": automatic detection")] ), (None, common.highlight_key("hex", "e") + [("text", ": Hex")] ), (None, common.highlight_key("html", "h") + [("text", ": HTML")] ), (None, common.highlight_key("image", "i") + [("text", ": Image")] ), (None, common.highlight_key("javascript", "j") + [("text", ": JavaScript")] ), (None, common.highlight_key("json", "s") + [("text", ": JSON")] ), (None, common.highlight_key("urlencoded", "u") + [("text", ": URL-encoded data")] ), (None, common.highlight_key("raw", "r") + [("text", ": raw data")] ), (None, common.highlight_key("xml", "x") + [("text", ": XML")] ), (None, common.highlight_key("amf", "f") + [("text", ": AMF (requires PyAMF)")] ), ("o", "toggle options:"), (None, common.highlight_key("anticache", "a") + [("text", ": prevent cached responses")] ), (None, common.highlight_key("anticomp", "c") + [("text", ": prevent compressed responses")] ), (None, common.highlight_key("showhost", "h") + [("text", ": use Host header for URL display")] ), (None, common.highlight_key("killextra", "k") + [("text", ": kill requests not part of server replay")] ), (None, common.highlight_key("norefresh", "n") + [("text", ": disable server replay response refresh")] ), (None, common.highlight_key("upstream certs", "u") + [("text", ": sniff cert info from upstream server")] ), ("q", "quit / return to flow list"), ("Q", "quit without confirm prompt"), ("P", "set reverse proxy mode"), ("R", "edit replacement patterns"), ("s", "set/unset script"), ("S", "server replay"), ("t", "set sticky cookie expression"), ("u", "set sticky auth expression"), ] text.extend(common.format_keyvals(keys, key="key", val="text", indent=4)) text.append(urwid.Text([("head", "\n\nFilter expressions:\n")])) f = [] for i in filt.filt_unary: f.append( ("~%s"%i.code, i.help) ) for i in filt.filt_rex: f.append( ("~%s regex"%i.code, i.help) ) for i in filt.filt_int: f.append( ("~%s int"%i.code, i.help) ) f.sort() f.extend( [ ("!", "unary not"), ("&", "and"), ("|", "or"), ("(...)", "grouping"), ] ) text.extend(common.format_keyvals(f, key="key", val="text", indent=4)) text.append( urwid.Text( [ "\n", ("text", " Regexes are Python-style.\n"), ("text", " Regexes can be specified as quoted strings.\n"), ("text", " Header matching (~h, ~hq, ~hs) is against a string of the form \"name: value\".\n"), ("text", " Expressions with no operators are regex matches against URL.\n"), ("text", " Default binary operator is &.\n"), ("head", "\n Examples:\n"), ] ) ) examples = [ ("google\.com", "Url containing \"google.com"), ("~q ~b test", "Requests where body contains \"test\""), ("!(~q & ~t \"text/html\")", "Anything but requests with a text/html content type."), ] text.extend(common.format_keyvals(examples, key="key", val="text", indent=4)) return text def keypress(self, size, key): key = common.shortcuts(key) if key == "q": self.master.statusbar = self.state[0] self.master.body = self.state[1] self.master.header = self.state[2] self.master.make_view() return None elif key == "?": key = None return urwid.ListBox.keypress(self, size, key) <file_sep>#!/bin/sh # Quick and dangerous script for building OSX binaries. # First, have a recent checkout of the dev version of pyinstaller. Change into # the pyinstaller directory, and then run this script. DST=/tmp/osx-mitmproxy MITMPROXY=~/mitmproxy/mitmproxy PYINST_CMD="./pyinstaller.py -F --clean" rm -rf $DST mkdir -p $DST rm -rf mitmproxy rm -rf mitmdump $PYINST_CMD $MITMPROXY/mitmproxy cp mitmproxy/dist/mitmproxy $DST $PYINST_CMD $MITMPROXY/mitmdump cp mitmdump/dist/mitmdump $DST cshape $MITMPROXY/doc-src $DST/doc <file_sep> def request(ctx, f): f = ctx.duplicate_flow(f) ctx.replay_request(f) <file_sep> def start(ctx): raise ValueError <file_sep>from countershape import Page pages = [ Page("30second.html", "Client playback: a 30 second example"), Page("gamecenter.html", "Setting highscores on Apple's GameCenter"), ] <file_sep>import Image, cStringIO def response(context, flow): if flow.response.headers["content-type"] == ["image/png"]: s = cStringIO.StringIO(flow.response.content) img = Image.open(s).rotate(180) s2 = cStringIO.StringIO() img.save(s2, "png") flow.response.content = s2.getvalue() <file_sep>import libmproxy.console.help as help class DummyMaster: def make_view(self): pass class TestHelp: def test_helptext(self): h = help.HelpView(None, "foo", None) assert h.helptext() def test_keypress(self): h = help.HelpView(DummyMaster(), "foo", [1, 2, 3]) assert not h.keypress((0, 0), "q") assert not h.keypress((0, 0), "?") assert h.keypress((0, 0), "o") == "o" <file_sep>import argparse from libmproxy import proxy, flow, cmdline import tutils from libpathod import test from netlib import http, tcp import mock def test_proxy_error(): p = proxy.ProxyError(111, "msg") assert str(p) def test_app_registry(): ar = proxy.AppRegistry() ar.add("foo", "domain", 80) r = tutils.treq() r.host = "domain" r.port = 80 assert ar.get(r) r.port = 81 assert not ar.get(r) r = tutils.treq() r.host = "domain2" r.port = 80 assert not ar.get(r) r.headers["host"] = ["domain"] assert ar.get(r) class TestServerConnection: def setUp(self): self.d = test.Daemon() def tearDown(self): self.d.shutdown() def test_simple(self): sc = proxy.ServerConnection(proxy.ProxyConfig(), "http", self.d.IFACE, self.d.port, "host.com") sc.connect() r = tutils.treq() r.path = "/p/200:da" sc.send(r) assert http.read_response(sc.rfile, r.method, 1000) assert self.d.last_log() r.content = flow.CONTENT_MISSING tutils.raises("incomplete request", sc.send, r) sc.terminate() def test_terminate_error(self): sc = proxy.ServerConnection(proxy.ProxyConfig(), "http", self.d.IFACE, self.d.port, "host.com") sc.connect() sc.connection = mock.Mock() sc.connection.flush = mock.Mock(side_effect=tcp.NetLibDisconnect) sc.terminate() class MockParser: def __init__(self): self.err = None def error(self, e): self.err = e def __repr__(self): return "ParseError(%s)"%self.err class TestProcessProxyOptions: def p(self, *args): parser = argparse.ArgumentParser() cmdline.common_options(parser) opts = parser.parse_args(args=args) m = MockParser() return m, proxy.process_proxy_options(m, opts) def assert_err(self, err, *args): m, p = self.p(*args) assert err.lower() in m.err.lower() def assert_noerr(self, *args): m, p = self.p(*args) assert p return p def test_simple(self): assert self.p() def test_cert(self): self.assert_noerr("--cert", tutils.test_data.path("data/testkey.pem")) self.assert_err("does not exist", "--cert", "nonexistent") def test_confdir(self): with tutils.tmpdir() as confdir: self.assert_noerr("--confdir", confdir) @mock.patch("libmproxy.platform.resolver", None) def test_no_transparent(self): self.assert_err("transparent mode not supported", "-T") @mock.patch("libmproxy.platform.resolver") def test_transparent_reverse(self, o): self.assert_err("can't set both", "-P", "reverse", "-T") self.assert_noerr("-T") assert o.call_count == 1 self.assert_err("invalid reverse proxy", "-P", "reverse") self.assert_noerr("-P", "http://localhost") def test_certs(self): with tutils.tmpdir() as confdir: self.assert_noerr("--client-certs", confdir) self.assert_err("directory does not exist", "--client-certs", "nonexistent") def test_auth(self): p = self.assert_noerr("--nonanonymous") assert p.authenticator p = self.assert_noerr("--htpasswd", tutils.test_data.path("data/htpasswd")) assert p.authenticator self.assert_err("invalid htpasswd file", "--htpasswd", tutils.test_data.path("data/htpasswd.invalid")) p = self.assert_noerr("--singleuser", "test:test") assert p.authenticator self.assert_err("invalid single-user specification", "--singleuser", "test") class TestProxyServer: def test_err(self): parser = argparse.ArgumentParser() cmdline.common_options(parser) opts = parser.parse_args(args=[]) tutils.raises("error starting proxy server", proxy.ProxyServer, opts, 1) class TestDummyServer: def test_simple(self): d = proxy.DummyServer(None) d.start_slave() d.shutdown() <file_sep>import libmproxy.console.common as common from libmproxy import utils, flow, encoding import tutils def test_format_flow(): f = tutils.tflow_full() assert common.format_flow(f, True) assert common.format_flow(f, True, hostheader=True) assert common.format_flow(f, True, extended=True) <file_sep>__mitmproxy__ is an interactive, SSL-capable man-in-the-middle proxy for HTTP with a console interface. __mitmdump__ is the command-line version of mitmproxy. Think tcpdump for HTTP. __libmproxy__ is the library that mitmproxy and mitmdump are built on. Documentation, tutorials and distribution packages can be found on the mitmproxy.org website: [mitmproxy.org](http://mitmproxy.org). Features -------- - Intercept HTTP requests and responses and modify them on the fly. - Save complete HTTP conversations for later replay and analysis. - Replay the client-side of an HTTP conversations. - Replay HTTP responses of a previously recorded server. - Reverse proxy mode to forward traffic to a specified server. - Transparent proxy mode on OSX and Linux. - Make scripted changes to HTTP traffic using Python. - SSL certificates for interception are generated on the fly. - And much, much more. Requirements ------------ * [Python](http://www.python.org) 2.7.x. * [netlib](http://pypi.python.org/pypi/netlib), version matching mitmproxy. * [PyOpenSSL](http://pypi.python.org/pypi/pyOpenSSL) 0.13 or newer. * [pyasn1](http://pypi.python.org/pypi/pyasn1) 0.1.2 or newer. * [urwid](http://excess.org/urwid/) version 1.1 or newer. * [PIL](http://www.pythonware.com/products/pil/) version 1.1 or newer. * [lxml](http://lxml.de/) version 2.3 or newer. * [flask](http://flask.pocoo.org/) version 0.9 or newer. Optional, for extended content decoding: * [PyAMF](http://www.pyamf.org/) version 0.6.1 or newer. * [protobuf](https://code.google.com/p/protobuf/) version 2.5.0 or newer. __mitmproxy__ is tested and developed on OSX, Linux and OpenBSD. Windows is not officially supported at the moment. Hacking ------- The following components are needed if you plan to hack on mitmproxy: * The test suite uses the [nose](http://readthedocs.org/docs/nose/en/latest/) unit testing framework and requires [pathod](http://pathod.org) and [flask](http://flask.pocoo.org/). * Rendering the documentation requires [countershape](http://github.com/cortesi/countershape). For convenience, all dependencies save countershape, can be installed from pypi to a virtualenv with 'pip install -r requirements.txt'. Please ensure that all patches are accompanied by matching changes in the test suite. The project maintains 100% test coverage. <file_sep>import os, shutil, tempfile from contextlib import contextmanager from libmproxy import flow, utils, controller from netlib import certutils import mock def treq(conn=None): if not conn: conn = flow.ClientConnect(("address", 22)) conn.reply = controller.DummyReply() headers = flow.ODictCaseless() headers["header"] = ["qvalue"] r = flow.Request(conn, (1, 1), "host", 80, "http", "GET", "/path", headers, "content") r.reply = controller.DummyReply() return r def tresp(req=None): if not req: req = treq() headers = flow.ODictCaseless() headers["header_response"] = ["svalue"] cert = certutils.SSLCert.from_der(file(test_data.path("data/dercert"),"rb").read()) resp = flow.Response(req, (1, 1), 200, "message", headers, "content_response", cert) resp.reply = controller.DummyReply() return resp def tflow(): r = treq() return flow.Flow(r) def tflow_full(): r = treq() f = flow.Flow(r) f.response = tresp(r) return f def tflow_err(): r = treq() f = flow.Flow(r) f.error = flow.Error(r, "error") f.error.reply = controller.DummyReply() return f @contextmanager def tmpdir(*args, **kwargs): orig_workdir = os.getcwd() temp_workdir = tempfile.mkdtemp(*args, **kwargs) os.chdir(temp_workdir) yield temp_workdir os.chdir(orig_workdir) shutil.rmtree(temp_workdir) def raises(exc, obj, *args, **kwargs): """ Assert that a callable raises a specified exception. :exc An exception class or a string. If a class, assert that an exception of this type is raised. If a string, assert that the string occurs in the string representation of the exception, based on a case-insenstivie match. :obj A callable object. :args Arguments to be passsed to the callable. :kwargs Arguments to be passed to the callable. """ try: apply(obj, args, kwargs) except Exception, v: if isinstance(exc, basestring): if exc.lower() in str(v).lower(): return else: raise AssertionError( "Expected %s, but caught %s"%( repr(str(exc)), v ) ) else: if isinstance(v, exc): return else: raise AssertionError( "Expected %s, but caught %s %s"%( exc.__name__, v.__class__.__name__, str(v) ) ) raise AssertionError("No exception raised.") test_data = utils.Data(__name__) <file_sep>from countershape import Page pages = [ Page("testing.html", "Testing"), # Page("addingviews.html", "Writing Content Views"), ] <file_sep>#!/bin/sh # Assuming: # mitmproxy/mitmdump is running on port 8080 in straight proxy mode. # pathod is running on port 9999 BASE_HTTP="/Users/aldo/git/public/pathod/pathoc -Tt 1 -eo -I 200,400,405,502 -p 8080 localhost " #$BASE_HTTP -n 10000 "get:'http://localhost:9999':ir,@1" #$BASE_HTTP -n 100 "get:'http://localhost:9999':dr" #$BASE_HTTP -n 10000 "get:'http://localhost:9999/p/200:ir,@300.0 # Assuming: # mitmproxy/mitmdump is running on port 8080 in straight proxy mode. # pathod with SSL enabled is running on port 9999 BASE_HTTPS="/Users/aldo/git/public/pathod/pathoc -sc localhost:9999 -Tt 1 -eo -I 200,400,404,405,502,800 -p 8080 localhost " $BASE_HTTPS -en 10000 "get:'/p/200:b@10:ir,@1'" #$BASE_HTTPS -en 10000 "get:'/p/200:ir,@1'" #$BASE_HTTPS -n 100 "get:'/p/200:dr'" #$BASE_HTTPS -n 10000 "get:'/p/200:ir,@3000'" #$BASE_HTTPS -n 10000 "get:'/p/200:ir,\"\\n\"'" <file_sep>import urwid import common footer = [ ('heading_key', "q"), ":back ", ] class FlowDetailsView(urwid.ListBox): def __init__(self, master, flow, state): self.master, self.flow, self.state = master, flow, state urwid.ListBox.__init__( self, self.flowtext() ) def keypress(self, size, key): key = common.shortcuts(key) if key == "q": self.master.statusbar = self.state[0] self.master.body = self.state[1] self.master.header = self.state[2] self.master.make_view() return None elif key == "?": key = None return urwid.ListBox.keypress(self, size, key) def flowtext(self): text = [] title = urwid.Text("Flow details") title = urwid.Padding(title, align="left", width=("relative", 100)) title = urwid.AttrWrap(title, "heading") text.append(title) if self.flow.response: c = self.flow.response.cert if c: text.append(urwid.Text([("head", "Server Certificate:")])) parts = [ ["Type", "%s, %s bits"%c.keyinfo], ["SHA1 digest", c.digest("sha1")], ["Valid to", str(c.notafter)], ["Valid from", str(c.notbefore)], ["Serial", str(c.serial)], ] parts.append( [ "Subject", urwid.BoxAdapter( urwid.ListBox(common.format_keyvals(c.subject, key="highlight", val="text")), len(c.subject) ) ] ) parts.append( [ "Issuer", urwid.BoxAdapter( urwid.ListBox(common.format_keyvals(c.issuer, key="highlight", val="text")), len(c.issuer) ) ] ) if c.altnames: parts.append( [ "Alt names", ", ".join(c.altnames) ] ) text.extend(common.format_keyvals(parts, key="key", val="text", indent=4)) if self.flow.request.client_conn: text.append(urwid.Text([("head", "Client Connection:")])) cc = self.flow.request.client_conn parts = [ ["Address", "%s:%s"%tuple(cc.address)], ["Requests", "%s"%cc.requestcount], ["Closed", "%s"%cc.close], ] text.extend(common.format_keyvals(parts, key="key", val="text", indent=4)) return text <file_sep> def request(context, flow): if "application/x-www-form-urlencoded" in flow.request.headers["content-type"]: frm = flow.request.get_form_urlencoded() frm["mitmproxy"] = ["rocks"] flow.request.set_form_urlencoded(frm) <file_sep>import tutils from libmproxy.platform import pf class TestLookup: def test_simple(self): p = tutils.test_data.path("data/pf01") d = open(p,"rb").read() assert pf.lookup("192.168.1.111", 40000, d) == ("5.5.5.5", 80) assert not pf.lookup("192.168.1.112", 40000, d) assert not pf.lookup("192.168.1.111", 40001, d) <file_sep>import urwid import urwid.util from .. import utils, flow VIEW_LIST = 0 VIEW_FLOW = 1 VIEW_FLOW_REQUEST = 0 VIEW_FLOW_RESPONSE = 1 def highlight_key(s, k): l = [] parts = s.split(k, 1) if parts[0]: l.append(("text", parts[0])) l.append(("key", k)) if parts[1]: l.append(("text", parts[1])) return l KEY_MAX = 30 def format_keyvals(lst, key="key", val="text", indent=0): """ Format a list of (key, value) tuples. If key is None, it's treated specially: - We assume a sub-value, and add an extra indent. - The value is treated as a pre-formatted list of directives. """ ret = [] if lst: maxk = min(max(len(i[0]) for i in lst if i and i[0]), KEY_MAX) for i, kv in enumerate(lst): if kv is None: ret.append(urwid.Text("")) else: cols = [] # This cumbersome construction process is here for a reason: # Urwid < 1.0 barfs if given a fixed size column of size zero. if indent: cols.append(("fixed", indent, urwid.Text(""))) cols.extend([ ( "fixed", maxk, urwid.Text([(key, kv[0] or "")]) ), kv[1] if isinstance(kv[1], urwid.Widget) else urwid.Text([(val, kv[1])]) ]) ret.append(urwid.Columns(cols, dividechars = 2)) return ret def shortcuts(k): if k == " ": k = "page down" elif k == "j": k = "down" elif k == "k": k = "up" return k def fcol(s, attr): s = unicode(s) return ( "fixed", len(s), urwid.Text( [ (attr, s) ] ) ) if urwid.util.detected_encoding: SYMBOL_REPLAY = u"\u21ba" SYMBOL_RETURN = u"\u2190" else: SYMBOL_REPLAY = u"[r]" SYMBOL_RETURN = u"<-" def raw_format_flow(f, focus, extended, padding): f = dict(f) pile = [] req = [] if extended: req.append( fcol( utils.format_timestamp(f["req_timestamp"]), "highlight" ) ) else: req.append(fcol(">>" if focus else " ", "focus")) if f["req_is_replay"]: req.append(fcol(SYMBOL_REPLAY, "replay")) req.append(fcol(f["req_method"], "method")) preamble = sum(i[1] for i in req) + len(req) -1 if f["intercepting"] and not f["req_acked"]: uc = "intercept" elif f["resp_code"] or f["err_msg"]: uc = "text" else: uc = "title" req.append( urwid.Text([(uc, f["req_url"])]) ) pile.append(urwid.Columns(req, dividechars=1)) resp = [] resp.append( ("fixed", preamble, urwid.Text("")) ) if f["resp_code"]: codes = { 2: "code_200", 3: "code_300", 4: "code_400", 5: "code_500", } ccol = codes.get(f["resp_code"]/100, "code_other") resp.append(fcol(SYMBOL_RETURN, ccol)) if f["resp_is_replay"]: resp.append(fcol(SYMBOL_REPLAY, "replay")) resp.append(fcol(f["resp_code"], ccol)) if f["intercepting"] and f["resp_code"] and not f["resp_acked"]: rc = "intercept" else: rc = "text" if f["resp_ctype"]: resp.append(fcol(f["resp_ctype"], rc)) resp.append(fcol(f["resp_clen"], rc)) resp.append(fcol(f["resp_rate"], rc)) elif f["err_msg"]: resp.append(fcol(SYMBOL_RETURN, "error")) resp.append( urwid.Text([ ( "error", f["err_msg"] ) ]) ) pile.append(urwid.Columns(resp, dividechars=1)) return urwid.Pile(pile) class FlowCache: @utils.LRUCache(200) def format_flow(self, *args): return raw_format_flow(*args) flowcache = FlowCache() def format_flow(f, focus, extended=False, hostheader=False, padding=2): d = dict( intercepting = f.intercepting, req_timestamp = f.request.timestamp_start, req_is_replay = f.request.is_replay(), req_method = f.request.method, req_acked = f.request.reply.acked, req_url = f.request.get_url(hostheader=hostheader), err_msg = f.error.msg if f.error else None, resp_code = f.response.code if f.response else None, ) if f.response: if f.response.content: contentdesc = utils.pretty_size(len(f.response.content)) elif f.response.content == flow.CONTENT_MISSING: contentdesc = "[content missing]" else: contentdesc = "[no content]" delta = f.response.timestamp_end - f.response.timestamp_start size = len(f.response.content) + f.response.get_header_size() rate = utils.pretty_size(size / delta) d.update(dict( resp_code = f.response.code, resp_is_replay = f.response.is_replay(), resp_acked = f.response.reply.acked, resp_clen = contentdesc, resp_rate = "{0}/s".format(rate), )) t = f.response.headers["content-type"] if t: d["resp_ctype"] = t[0].split(";")[0] else: d["resp_ctype"] = "" return flowcache.format_flow(tuple(sorted(d.items())), focus, extended, padding) def int_version(v): SIG = 3 v = urwid.__version__.split("-")[0].split(".") x = 0 for i in range(min(SIG, len(v))): x += int(v[i]) * 10**(SIG-i) return x # We have to do this to be portable over 0.9.8 and 0.9.9 If compatibility # becomes a pain to maintain, we'll just mandate 0.9.9 or newer. class WWrap(urwid.WidgetWrap): if int_version(urwid.__version__) >= 990: def set_w(self, x): self._w = x def get_w(self): return self._w w = property(get_w, set_w) <file_sep>import mock from libmproxy import controller class TestMaster: def test_default_handler(self): m = controller.Master(None) msg = mock.MagicMock() m.handle(msg) assert msg.reply.call_count == 1 <file_sep>Flask==0.9 Jinja2==2.7 MarkupSafe==0.18 PIL==1.1.7 Werkzeug==0.8.3 lxml==3.2.1 netlib==0.9 nose==1.3.0 pathod==0.9 pyOpenSSL==0.13 pyasn1==0.1.7 requests==1.2.2 urwid==1.1.1 wsgiref==0.1.2 jsbeautifier==1.4.0 <file_sep>import os, sys import urwid import common, grideditor, contentview from .. import utils, flow, controller def _mkhelp(): text = [] keys = [ ("A", "accept all intercepted flows"), ("a", "accept this intercepted flow"), ("b", "save request/response body"), ("d", "delete flow"), ("D", "duplicate flow"), ("e", "edit request/response"), ("f", "load full body data"), ("m", "change body display mode for this entity"), (None, common.highlight_key("automatic", "a") + [("text", ": automatic detection")] ), (None, common.highlight_key("hex", "e") + [("text", ": Hex")] ), (None, common.highlight_key("html", "h") + [("text", ": HTML")] ), (None, common.highlight_key("image", "i") + [("text", ": Image")] ), (None, common.highlight_key("javascript", "j") + [("text", ": JavaScript")] ), (None, common.highlight_key("json", "s") + [("text", ": JSON")] ), (None, common.highlight_key("urlencoded", "u") + [("text", ": URL-encoded data")] ), (None, common.highlight_key("raw", "r") + [("text", ": raw data")] ), (None, common.highlight_key("xml", "x") + [("text", ": XML")] ), ("M", "change default body display mode"), ("p", "previous flow"), ("r", "replay request"), ("V", "revert changes to request"), ("v", "view body in external viewer"), ("w", "save all flows matching current limit"), ("W", "save this flow"), ("x", "delete body"), ("X", "view flow details"), ("z", "encode/decode a request/response"), ("tab", "toggle request/response view"), ("space", "next flow"), ("|", "run script on this flow"), ] text.extend(common.format_keyvals(keys, key="key", val="text", indent=4)) return text help_context = _mkhelp() footer = [ ('heading_key', "?"), ":help ", ('heading_key', "q"), ":back ", ] class FlowViewHeader(common.WWrap): def __init__(self, master, f): self.master, self.flow = master, f self.w = common.format_flow(f, False, extended=True, padding=0, hostheader=self.master.showhost) def refresh_flow(self, f): if f == self.flow: self.w = common.format_flow(f, False, extended=True, padding=0, hostheader=self.master.showhost) class CallbackCache: @utils.LRUCache(200) def _callback(self, method, *args, **kwargs): return getattr(self.obj, method)(*args, **kwargs) def callback(self, obj, method, *args, **kwargs): # obj varies! self.obj = obj return self._callback(method, *args, **kwargs) cache = CallbackCache() class FlowView(common.WWrap): REQ = 0 RESP = 1 method_options = [ ("get", "g"), ("post", "p"), ("put", "u"), ("head", "h"), ("trace", "t"), ("delete", "d"), ("options", "o"), ("edit raw", "e"), ] def __init__(self, master, state, flow): self.master, self.state, self.flow = master, state, flow if self.state.view_flow_mode == common.VIEW_FLOW_RESPONSE: self.view_response() else: self.view_request() def _cached_content_view(self, viewmode, hdrItems, content, limit): return contentview.get_content_view(viewmode, hdrItems, content, limit, self.master.add_event) def content_view(self, viewmode, conn): full = self.state.get_flow_setting( self.flow, (self.state.view_flow_mode, "fullcontents"), False ) if full: limit = sys.maxint else: limit = contentview.VIEW_CUTOFF return cache.callback( self, "_cached_content_view", viewmode, tuple(tuple(i) for i in conn.headers.lst), conn.content, limit ) def conn_text(self, conn): txt = common.format_keyvals( [(h+":", v) for (h, v) in conn.headers.lst], key = "header", val = "text" ) if conn.content is not None: override = self.state.get_flow_setting( self.flow, (self.state.view_flow_mode, "prettyview"), ) viewmode = self.state.default_body_view if override is None else override if conn.content == flow.CONTENT_MISSING: msg, body = "", [urwid.Text([("error", "[content missing]")])] else: msg, body = self.content_view(viewmode, conn) cols = [ urwid.Text( [ ("heading", msg), ] ) ] if override is not None: cols.append( urwid.Text( [ " ", ('heading', "["), ('heading_key', "m"), ('heading', (":%s]"%viewmode.name)), ], align="right" ) ) title = urwid.AttrWrap(urwid.Columns(cols), "heading") txt.append(title) txt.extend(body) elif conn.content == flow.CONTENT_MISSING: pass return urwid.ListBox(txt) def _tab(self, content, attr): p = urwid.Text(content) p = urwid.Padding(p, align="left", width=("relative", 100)) p = urwid.AttrWrap(p, attr) return p def wrap_body(self, active, body): parts = [] if self.flow.intercepting and not self.flow.request.reply.acked: qt = "Request intercepted" else: qt = "Request" if active == common.VIEW_FLOW_REQUEST: parts.append(self._tab(qt, "heading")) else: parts.append(self._tab(qt, "heading_inactive")) if self.flow.intercepting and self.flow.response and not self.flow.response.reply.acked: st = "Response intercepted" else: st = "Response" if active == common.VIEW_FLOW_RESPONSE: parts.append(self._tab(st, "heading")) else: parts.append(self._tab(st, "heading_inactive")) h = urwid.Columns(parts) f = urwid.Frame( body, header=h ) return f def view_request(self): self.state.view_flow_mode = common.VIEW_FLOW_REQUEST body = self.conn_text(self.flow.request) self.w = self.wrap_body(common.VIEW_FLOW_REQUEST, body) self.master.statusbar.redraw() def view_response(self): self.state.view_flow_mode = common.VIEW_FLOW_RESPONSE if self.flow.response: body = self.conn_text(self.flow.response) else: body = urwid.ListBox( [ urwid.Text(""), urwid.Text( [ ("highlight", "No response. Press "), ("key", "e"), ("highlight", " and edit any aspect to add one."), ] ) ] ) self.w = self.wrap_body(common.VIEW_FLOW_RESPONSE, body) self.master.statusbar.redraw() def refresh_flow(self, c=None): if c == self.flow: if self.state.view_flow_mode == common.VIEW_FLOW_RESPONSE and self.flow.response: self.view_response() else: self.view_request() def set_method_raw(self, m): if m: self.flow.request.method = m self.master.refresh_flow(self.flow) def edit_method(self, m): if m == "e": self.master.prompt_edit("Method", self.flow.request.method, self.set_method_raw) else: for i in self.method_options: if i[1] == m: self.flow.request.method = i[0].upper() self.master.refresh_flow(self.flow) def save_body(self, path): if not path: return self.state.last_saveload = path if self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: c = self.flow.request else: c = self.flow.response path = os.path.expanduser(path) try: f = file(path, "wb") f.write(str(c.content)) f.close() except IOError, v: self.master.statusbar.message(v.strerror) def set_url(self, url): request = self.flow.request if not request.set_url(str(url)): return "Invalid URL." self.master.refresh_flow(self.flow) def set_resp_code(self, code): response = self.flow.response try: response.code = int(code) except ValueError: return None import BaseHTTPServer if BaseHTTPServer.BaseHTTPRequestHandler.responses.has_key(int(code)): response.msg = BaseHTTPServer.BaseHTTPRequestHandler.responses[int(code)][0] self.master.refresh_flow(self.flow) def set_resp_msg(self, msg): response = self.flow.response response.msg = msg self.master.refresh_flow(self.flow) def set_headers(self, lst, conn): conn.headers = flow.ODictCaseless(lst) def set_query(self, lst, conn): conn.set_query(flow.ODict(lst)) def set_path_components(self, lst, conn): conn.set_path_components([i[0] for i in lst]) def set_form(self, lst, conn): conn.set_form_urlencoded(flow.ODict(lst)) def edit_form(self, conn): self.master.view_grideditor( grideditor.URLEncodedFormEditor(self.master, conn.get_form_urlencoded().lst, self.set_form, conn) ) def edit_form_confirm(self, key, conn): if key == "y": self.edit_form(conn) def edit(self, part): if self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: conn = self.flow.request else: if not self.flow.response: self.flow.response = flow.Response( self.flow.request, self.flow.request.httpversion, 200, "OK", flow.ODictCaseless(), "", None ) self.flow.response.reply = controller.DummyReply() conn = self.flow.response self.flow.backup() if part == "r": c = self.master.spawn_editor(conn.content or "") conn.content = c.rstrip("\n") # what? elif part == "f": if not conn.get_form_urlencoded() and conn.content: self.master.prompt_onekey( "Existing body is not a URL-encoded form. Clear and edit?", [ ("yes", "y"), ("no", "n"), ], self.edit_form_confirm, conn ) else: self.edit_form(conn) elif part == "h": self.master.view_grideditor(grideditor.HeaderEditor(self.master, conn.headers.lst, self.set_headers, conn)) elif part == "p": p = conn.get_path_components() p = [[i] for i in p] self.master.view_grideditor(grideditor.PathEditor(self.master, p, self.set_path_components, conn)) elif part == "q": self.master.view_grideditor(grideditor.QueryEditor(self.master, conn.get_query().lst, self.set_query, conn)) elif part == "u" and self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: self.master.prompt_edit("URL", conn.get_url(), self.set_url) elif part == "m" and self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: self.master.prompt_onekey("Method", self.method_options, self.edit_method) elif part == "c" and self.state.view_flow_mode == common.VIEW_FLOW_RESPONSE: self.master.prompt_edit("Code", str(conn.code), self.set_resp_code) elif part == "m" and self.state.view_flow_mode == common.VIEW_FLOW_RESPONSE: self.master.prompt_edit("Message", conn.msg, self.set_resp_msg) self.master.refresh_flow(self.flow) def _view_nextprev_flow(self, np, flow): try: idx = self.state.view.index(flow) except IndexError: return if np == "next": new_flow, new_idx = self.state.get_next(idx) else: new_flow, new_idx = self.state.get_prev(idx) if new_flow is None: self.master.statusbar.message("No more flows!") return self.master.view_flow(new_flow) def view_next_flow(self, flow): return self._view_nextprev_flow("next", flow) def view_prev_flow(self, flow): return self._view_nextprev_flow("prev", flow) def change_this_display_mode(self, t): self.state.add_flow_setting( self.flow, (self.state.view_flow_mode, "prettyview"), contentview.get_by_shortcut(t) ) self.master.refresh_flow(self.flow) def delete_body(self, t): if t == "m": val = flow.CONTENT_MISSING else: val = None if self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: self.flow.request.content = val else: self.flow.response.content = val self.master.refresh_flow(self.flow) def keypress(self, size, key): if key == " ": self.view_next_flow(self.flow) return key = common.shortcuts(key) if self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: conn = self.flow.request else: conn = self.flow.response if key == "q": self.master.view_flowlist() key = None elif key == "tab": if self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: self.view_response() else: self.view_request() elif key in ("up", "down", "page up", "page down"): # Why doesn't this just work?? self.w.keypress(size, key) elif key == "a": self.flow.accept_intercept() self.master.view_flow(self.flow) elif key == "A": self.master.accept_all() self.master.view_flow(self.flow) elif key == "b": if conn: if self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: self.master.path_prompt( "Save request body: ", self.state.last_saveload, self.save_body ) else: self.master.path_prompt( "Save response body: ", self.state.last_saveload, self.save_body ) elif key == "d": if self.state.flow_count() == 1: self.master.view_flowlist() elif self.state.view.index(self.flow) == len(self.state.view)-1: self.view_prev_flow(self.flow) else: self.view_next_flow(self.flow) f = self.flow f.kill(self.master) self.state.delete_flow(f) elif key == "D": f = self.master.duplicate_flow(self.flow) self.master.view_flow(f) self.master.statusbar.message("Duplicated.") elif key == "e": if self.state.view_flow_mode == common.VIEW_FLOW_REQUEST: self.master.prompt_onekey( "Edit request", ( ("query", "q"), ("path", "p"), ("url", "u"), ("header", "h"), ("form", "f"), ("raw body", "r"), ("method", "m"), ), self.edit ) else: self.master.prompt_onekey( "Edit response", ( ("code", "c"), ("message", "m"), ("header", "h"), ("raw body", "r"), ), self.edit ) key = None elif key == "f": self.master.statusbar.message("Loading all body data...") self.state.add_flow_setting( self.flow, (self.state.view_flow_mode, "fullcontents"), True ) self.master.refresh_flow(self.flow) self.master.statusbar.message("") elif key == "m": p = list(contentview.view_prompts) p.insert(0, ("clear", "c")) self.master.prompt_onekey( "Display mode", p, self.change_this_display_mode ) key = None elif key == "p": self.view_prev_flow(self.flow) elif key == "r": self.flow.backup() r = self.master.replay_request(self.flow) if r: self.master.statusbar.message(r) self.master.refresh_flow(self.flow) elif key == "V": if not self.flow.modified(): self.master.statusbar.message("Flow not modified.") return self.state.revert(self.flow) self.master.refresh_flow(self.flow) self.master.statusbar.message("Reverted.") elif key == "W": self.master.path_prompt( "Save this flow: ", self.state.last_saveload, self.master.save_one_flow, self.flow ) elif key == "v": if conn and conn.content: t = conn.headers["content-type"] or [None] t = t[0] if os.environ.has_key("EDITOR") or os.environ.has_key("PAGER"): self.master.spawn_external_viewer(conn.content, t) else: self.master.statusbar.message("Error! Set $EDITOR or $PAGER.") elif key == "|": self.master.path_prompt( "Send flow to script: ", self.state.last_script, self.master.run_script_once, self.flow ) elif key == "x": self.master.prompt_onekey( "Delete body", ( ("completely", "c"), ("mark as missing", "m"), ), self.delete_body ) key = None elif key == "X": self.master.view_flowdetails(self.flow) elif key == "z": if conn: self.flow.backup() e = conn.headers.get_first("content-encoding", "identity") if e != "identity": if not conn.decode(): self.master.statusbar.message("Could not decode - invalid data?") else: self.master.prompt_onekey( "Select encoding: ", ( ("gzip", "z"), ("deflate", "d"), ), self.encode_callback, conn ) self.master.refresh_flow(self.flow) else: return key def encode_callback(self, key, conn): encoding_map = { "z": "gzip", "d": "deflate", } conn.encode(encoding_map[key]) self.master.refresh_flow(self.flow) <file_sep>[rum] branch = True [report] omit = *contrib*, *tnetstring*, *platform*, *console* include = *libmproxy* <file_sep>from libmproxy import encoding def test_identity(): assert "string" == encoding.decode("identity", "string") assert "string" == encoding.encode("identity", "string") assert not encoding.encode("nonexistent", "string") assert None == encoding.decode("nonexistent encoding", "string") def test_gzip(): assert "string" == encoding.decode("gzip", encoding.encode("gzip", "string")) assert None == encoding.decode("gzip", "bogus") def test_deflate(): assert "string" == encoding.decode("deflate", encoding.encode("deflate", "string")) assert "string" == encoding.decode("deflate", encoding.encode("deflate", "string")[2:-4]) assert None == encoding.decode("deflate", "bogus") <file_sep> var = 0 def here(ctx): global var var += 1 return var def errargs(): pass <file_sep>""" This module provides more sophisticated flow tracking. These match requests with their responses, and provide filtering and interception facilities. """ import hashlib, Cookie, cookielib, copy, re, urlparse, os import time, urllib import tnetstring, filt, script, utils, encoding, proxy from email.utils import parsedate_tz, formatdate, mktime_tz from netlib import odict, http, certutils import controller, version import app HDR_FORM_URLENCODED = "application/x-www-form-urlencoded" CONTENT_MISSING = 0 ODict = odict.ODict ODictCaseless = odict.ODictCaseless class ReplaceHooks: def __init__(self): self.lst = [] def set(self, r): self.clear() for i in r: self.add(*i) def add(self, fpatt, rex, s): """ add a replacement hook. fpatt: a string specifying a filter pattern. rex: a regular expression. s: the replacement string returns true if hook was added, false if the pattern could not be parsed. """ cpatt = filt.parse(fpatt) if not cpatt: return False try: re.compile(rex) except re.error: return False self.lst.append((fpatt, rex, s, cpatt)) return True def get_specs(self): """ Retrieve the hook specifcations. Returns a list of (fpatt, rex, s) tuples. """ return [i[:3] for i in self.lst] def count(self): return len(self.lst) def run(self, f): for _, rex, s, cpatt in self.lst: if cpatt(f): if f.response: f.response.replace(rex, s) else: f.request.replace(rex, s) def clear(self): self.lst = [] class SetHeaders: def __init__(self): self.lst = [] def set(self, r): self.clear() for i in r: self.add(*i) def add(self, fpatt, header, value): """ Add a set header hook. fpatt: String specifying a filter pattern. header: Header name. value: Header value string Returns True if hook was added, False if the pattern could not be parsed. """ cpatt = filt.parse(fpatt) if not cpatt: return False self.lst.append((fpatt, header, value, cpatt)) return True def get_specs(self): """ Retrieve the hook specifcations. Returns a list of (fpatt, rex, s) tuples. """ return [i[:3] for i in self.lst] def count(self): return len(self.lst) def clear(self): self.lst = [] def run(self, f): for _, header, value, cpatt in self.lst: if cpatt(f): if f.response: del f.response.headers[header] else: del f.request.headers[header] for _, header, value, cpatt in self.lst: if cpatt(f): if f.response: f.response.headers.add(header, value) else: f.request.headers.add(header, value) class ScriptContext: def __init__(self, master): self._master = master def log(self, *args, **kwargs): """ Logs an event. How this is handled depends on the front-end. mitmdump will display events if the eventlog flag ("-e") was passed. mitmproxy sends output to the eventlog for display ("v" keyboard shortcut). """ self._master.add_event(*args, **kwargs) def duplicate_flow(self, f): """ Returns a duplicate of the specified flow. The flow is also injected into the current state, and is ready for editing, replay, etc. """ self._master.pause_scripts = True f = self._master.duplicate_flow(f) self._master.pause_scripts = False return f def replay_request(self, f): """ Replay the request on the current flow. The response will be added to the flow object. """ self._master.replay_request(f) class decoded(object): """ A context manager that decodes a request, response or error, and then re-encodes it with the same encoding after execution of the block. Example: with decoded(request): request.content = request.content.replace("foo", "bar") """ def __init__(self, o): self.o = o ce = o.headers.get_first("content-encoding") if ce in encoding.ENCODINGS: self.ce = ce else: self.ce = None def __enter__(self): if self.ce: self.o.decode() def __exit__(self, type, value, tb): if self.ce: self.o.encode(self.ce) class StateObject: def __eq__(self, other): try: return self._get_state() == other._get_state() except AttributeError: return False class HTTPMsg(StateObject): def get_decoded_content(self): """ Returns the decoded content based on the current Content-Encoding header. Doesn't change the message iteself or its headers. """ ce = self.headers.get_first("content-encoding") if not self.content or ce not in encoding.ENCODINGS: return self.content return encoding.decode(ce, self.content) def decode(self): """ Decodes content based on the current Content-Encoding header, then removes the header. If there is no Content-Encoding header, no action is taken. Returns True if decoding succeeded, False otherwise. """ ce = self.headers.get_first("content-encoding") if not self.content or ce not in encoding.ENCODINGS: return False data = encoding.decode( ce, self.content ) if data is None: return False self.content = data del self.headers["content-encoding"] return True def encode(self, e): """ Encodes content with the encoding e, where e is "gzip", "deflate" or "identity". """ # FIXME: Error if there's an existing encoding header? self.content = encoding.encode(e, self.content) self.headers["content-encoding"] = [e] def size(self, **kwargs): """ Size in bytes of a fully rendered message, including headers and HTTP lead-in. """ hl = len(self._assemble_head(**kwargs)) if self.content: return hl + len(self.content) else: return hl def get_content_type(self): return self.headers.get_first("content-type") def get_transmitted_size(self): # FIXME: this is inprecise in case chunking is used # (we should count the chunking headers) if not self.content: return 0 return len(self.content) class Request(HTTPMsg): """ An HTTP request. Exposes the following attributes: client_conn: ClientConnect object, or None if this is a replay. headers: ODictCaseless object content: Content of the request, None, or CONTENT_MISSING if there is content associated, but not present. CONTENT_MISSING evaluates to False to make checking for the presence of content natural. scheme: URL scheme (http/https) host: Host portion of the URL port: Destination port path: Path portion of the URL timestamp_start: Seconds since the epoch signifying request transmission started method: HTTP method timestamp_end: Seconds since the epoch signifying request transmission ended tcp_setup_timestamp: Seconds since the epoch signifying remote TCP connection setup completion time (or None, if request didn't results TCP setup) ssl_setup_timestamp: Seconds since the epoch signifying remote SSL encryption setup completion time (or None, if request didn't results SSL setup) """ def __init__(self, client_conn, httpversion, host, port, scheme, method, path, headers, content, timestamp_start=None, timestamp_end=None, tcp_setup_timestamp=None, ssl_setup_timestamp=None): assert isinstance(headers, ODictCaseless) self.client_conn = client_conn self.httpversion = httpversion self.host, self.port, self.scheme = host, port, scheme self.method, self.path, self.headers, self.content = method, path, headers, content self.timestamp_start = timestamp_start or utils.timestamp() self.timestamp_end = max(timestamp_end or utils.timestamp(), timestamp_start) self.close = False self.tcp_setup_timestamp = tcp_setup_timestamp self.ssl_setup_timestamp = ssl_setup_timestamp # Have this request's cookies been modified by sticky cookies or auth? self.stickycookie = False self.stickyauth = False def anticache(self): """ Modifies this request to remove headers that might produce a cached response. That is, we remove ETags and If-Modified-Since headers. """ delheaders = [ "if-modified-since", "if-none-match", ] for i in delheaders: del self.headers[i] def anticomp(self): """ Modifies this request to remove headers that will compress the resource's data. """ self.headers["accept-encoding"] = ["identity"] def constrain_encoding(self): """ Limits the permissible Accept-Encoding values, based on what we can decode appropriately. """ if self.headers["accept-encoding"]: self.headers["accept-encoding"] = [', '.join( e for e in encoding.ENCODINGS if e in self.headers["accept-encoding"][0] )] def _set_replay(self): self.client_conn = None def is_replay(self): """ Is this request a replay? """ if self.client_conn: return False else: return True def _load_state(self, state): if state["client_conn"]: if self.client_conn: self.client_conn._load_state(state["client_conn"]) else: self.client_conn = ClientConnect._from_state(state["client_conn"]) else: self.client_conn = None self.host = state["host"] self.port = state["port"] self.scheme = state["scheme"] self.method = state["method"] self.path = state["path"] self.headers = ODictCaseless._from_state(state["headers"]) self.content = state["content"] self.timestamp_start = state["timestamp_start"] self.timestamp_end = state["timestamp_end"] self.tcp_setup_timestamp = state["tcp_setup_timestamp"] self.ssl_setup_timestamp = state["ssl_setup_timestamp"] def _get_state(self): return dict( client_conn = self.client_conn._get_state() if self.client_conn else None, httpversion = self.httpversion, host = self.host, port = self.port, scheme = self.scheme, method = self.method, path = self.path, headers = self.headers._get_state(), content = self.content, timestamp_start = self.timestamp_start, timestamp_end = self.timestamp_end, tcp_setup_timestamp = self.tcp_setup_timestamp, ssl_setup_timestamp = self.ssl_setup_timestamp ) @classmethod def _from_state(klass, state): return klass( ClientConnect._from_state(state["client_conn"]), tuple(state["httpversion"]), str(state["host"]), state["port"], str(state["scheme"]), str(state["method"]), str(state["path"]), ODictCaseless._from_state(state["headers"]), state["content"], state["timestamp_start"], state["timestamp_end"], state["tcp_setup_timestamp"], state["ssl_setup_timestamp"] ) def __hash__(self): return id(self) def copy(self): c = copy.copy(self) c.headers = self.headers.copy() return c def get_form_urlencoded(self): """ Retrieves the URL-encoded form data, returning an ODict object. Returns an empty ODict if there is no data or the content-type indicates non-form data. """ if self.content and self.headers.in_any("content-type", HDR_FORM_URLENCODED, True): return ODict(utils.urldecode(self.content)) return ODict([]) def set_form_urlencoded(self, odict): """ Sets the body to the URL-encoded form data, and adds the appropriate content-type header. Note that this will destory the existing body if there is one. """ # FIXME: If there's an existing content-type header indicating a # url-encoded form, leave it alone. self.headers["Content-Type"] = [HDR_FORM_URLENCODED] self.content = utils.urlencode(odict.lst) def get_path_components(self): """ Returns the path components of the URL as a list of strings. Components are unquoted. """ _, _, path, _, _, _ = urlparse.urlparse(self.get_url()) return [urllib.unquote(i) for i in path.split("/") if i] def set_path_components(self, lst): """ Takes a list of strings, and sets the path component of the URL. Components are quoted. """ lst = [urllib.quote(i, safe="") for i in lst] path = "/" + "/".join(lst) scheme, netloc, _, params, query, fragment = urlparse.urlparse(self.get_url()) self.set_url(urlparse.urlunparse([scheme, netloc, path, params, query, fragment])) def get_query(self): """ Gets the request query string. Returns an ODict object. """ _, _, _, _, query, _ = urlparse.urlparse(self.get_url()) if query: return ODict(utils.urldecode(query)) return ODict([]) def set_query(self, odict): """ Takes an ODict object, and sets the request query string. """ scheme, netloc, path, params, _, fragment = urlparse.urlparse(self.get_url()) query = utils.urlencode(odict.lst) self.set_url(urlparse.urlunparse([scheme, netloc, path, params, query, fragment])) def get_url(self, hostheader=False): """ Returns a URL string, constructed from the Request's URL compnents. If hostheader is True, we use the value specified in the request Host header to construct the URL. """ if hostheader: host = self.headers.get_first("host") or self.host else: host = self.host host = host.encode("idna") return utils.unparse_url(self.scheme, host, self.port, self.path).encode('ascii') def set_url(self, url): """ Parses a URL specification, and updates the Request's information accordingly. Returns False if the URL was invalid, True if the request succeeded. """ parts = http.parse_url(url) if not parts: return False self.scheme, self.host, self.port, self.path = parts return True def get_cookies(self): cookie_headers = self.headers.get("cookie") if not cookie_headers: return None cookies = [] for header in cookie_headers: pairs = [pair.partition("=") for pair in header.split(';')] cookies.extend((pair[0],(pair[2],{})) for pair in pairs) return dict(cookies) def get_header_size(self): FMT = '%s %s HTTP/%s.%s\r\n%s\r\n' assembled_header = FMT % ( self.method, self.path, self.httpversion[0], self.httpversion[1], str(self.headers) ) return len(assembled_header) def _assemble_head(self, proxy=False): FMT = '%s %s HTTP/%s.%s\r\n%s\r\n' FMT_PROXY = '%s %s://%s:%s%s HTTP/%s.%s\r\n%s\r\n' headers = self.headers.copy() utils.del_all( headers, [ 'proxy-connection', 'keep-alive', 'connection', 'transfer-encoding' ] ) if not 'host' in headers: headers["host"] = [utils.hostport(self.scheme, self.host, self.port)] content = self.content if content: headers["Content-Length"] = [str(len(content))] else: content = "" if self.close: headers["connection"] = ["close"] if not proxy: return FMT % ( self.method, self.path, self.httpversion[0], self.httpversion[1], str(headers) ) else: return FMT_PROXY % ( self.method, self.scheme, self.host, self.port, self.path, self.httpversion[0], self.httpversion[1], str(headers) ) def _assemble(self, _proxy = False): """ Assembles the request for transmission to the server. We make some modifications to make sure interception works properly. Returns None if the request cannot be assembled. """ if self.content == CONTENT_MISSING: return None head = self._assemble_head(_proxy) if self.content: return head + self.content else: return head def replace(self, pattern, repl, *args, **kwargs): """ Replaces a regular expression pattern with repl in both the headers and the body of the request. Encoded content will be decoded before replacement, and re-encoded afterwards. Returns the number of replacements made. """ with decoded(self): self.content, c = utils.safe_subn(pattern, repl, self.content, *args, **kwargs) self.path, pc = utils.safe_subn(pattern, repl, self.path, *args, **kwargs) c += pc c += self.headers.replace(pattern, repl, *args, **kwargs) return c class Response(HTTPMsg): """ An HTTP response. Exposes the following attributes: request: Request object. code: HTTP response code msg: HTTP response message headers: ODict object content: Content of the request, None, or CONTENT_MISSING if there is content associated, but not present. CONTENT_MISSING evaluates to False to make checking for the presence of content natural. timestamp_start: Seconds since the epoch signifying response transmission started timestamp_end: Seconds since the epoch signifying response transmission ended """ def __init__(self, request, httpversion, code, msg, headers, content, cert, timestamp_start=None, timestamp_end=None): assert isinstance(headers, ODictCaseless) self.request = request self.httpversion, self.code, self.msg = httpversion, code, msg self.headers, self.content = headers, content self.cert = cert self.timestamp_start = timestamp_start or utils.timestamp() self.timestamp_end = timestamp_end or utils.timestamp() self.replay = False def _refresh_cookie(self, c, delta): """ Takes a cookie string c and a time delta in seconds, and returns a refreshed cookie string. """ c = Cookie.SimpleCookie(str(c)) for i in c.values(): if "expires" in i: d = parsedate_tz(i["expires"]) if d: d = mktime_tz(d) + delta i["expires"] = formatdate(d) else: # This can happen when the expires tag is invalid. # reddit.com sends a an expires tag like this: "Thu, 31 Dec # 2037 23:59:59 GMT", which is valid RFC 1123, but not # strictly correct according tot he cookie spec. Browsers # appear to parse this tolerantly - maybe we should too. # For now, we just ignore this. del i["expires"] return c.output(header="").strip() def refresh(self, now=None): """ This fairly complex and heuristic function refreshes a server response for replay. - It adjusts date, expires and last-modified headers. - It adjusts cookie expiration. """ if not now: now = time.time() delta = now - self.timestamp_start refresh_headers = [ "date", "expires", "last-modified", ] for i in refresh_headers: if i in self.headers: d = parsedate_tz(self.headers[i][0]) if d: new = mktime_tz(d) + delta self.headers[i] = [formatdate(new)] c = [] for i in self.headers["set-cookie"]: c.append(self._refresh_cookie(i, delta)) if c: self.headers["set-cookie"] = c def _set_replay(self): self.replay = True def is_replay(self): """ Is this response a replay? """ return self.replay def _load_state(self, state): self.code = state["code"] self.msg = state["msg"] self.headers = ODictCaseless._from_state(state["headers"]) self.content = state["content"] self.timestamp_start = state["timestamp_start"] self.timestamp_end = state["timestamp_end"] self.cert = certutils.SSLCert.from_pem(state["cert"]) if state["cert"] else None def _get_state(self): return dict( httpversion = self.httpversion, code = self.code, msg = self.msg, headers = self.headers._get_state(), timestamp_start = self.timestamp_start, timestamp_end = self.timestamp_end, cert = self.cert.to_pem() if self.cert else None, content = self.content, ) @classmethod def _from_state(klass, request, state): return klass( request, state["httpversion"], state["code"], str(state["msg"]), ODictCaseless._from_state(state["headers"]), state["content"], certutils.SSLCert.from_pem(state["cert"]) if state["cert"] else None, state["timestamp_start"], state["timestamp_end"], ) def copy(self): c = copy.copy(self) c.headers = self.headers.copy() return c def _assemble_head(self): FMT = '%s\r\n%s\r\n' headers = self.headers.copy() utils.del_all( headers, ['proxy-connection', 'transfer-encoding'] ) if self.content: headers["Content-Length"] = [str(len(self.content))] proto = "HTTP/%s.%s %s %s"%(self.httpversion[0], self.httpversion[1], self.code, str(self.msg)) data = (proto, str(headers)) return FMT%data def _assemble(self): """ Assembles the response for transmission to the client. We make some modifications to make sure interception works properly. Returns None if the request cannot be assembled. """ if self.content == CONTENT_MISSING: return None head = self._assemble_head() if self.content: return head + self.content else: return head def replace(self, pattern, repl, *args, **kwargs): """ Replaces a regular expression pattern with repl in both the headers and the body of the response. Encoded content will be decoded before replacement, and re-encoded afterwards. Returns the number of replacements made. """ with decoded(self): self.content, c = utils.safe_subn(pattern, repl, self.content, *args, **kwargs) c += self.headers.replace(pattern, repl, *args, **kwargs) return c def get_header_size(self): FMT = '%s\r\n%s\r\n' proto = "HTTP/%s.%s %s %s"%(self.httpversion[0], self.httpversion[1], self.code, str(self.msg)) assembled_header = FMT % (proto, str(self.headers)) return len(assembled_header) def get_cookies(self): cookie_headers = self.headers.get("set-cookie") if not cookie_headers: return None cookies = [] for header in cookie_headers: pairs = [pair.partition("=") for pair in header.split(';')] cookie_name = pairs[0][0] # the key of the first key/value pairs cookie_value = pairs[0][2] # the value of the first key/value pairs cookie_parameters = {key.strip().lower():value.strip() for key,sep,value in pairs[1:]} cookies.append((cookie_name, (cookie_value, cookie_parameters))) return dict(cookies) class ClientDisconnect: """ A client disconnection event. Exposes the following attributes: client_conn: ClientConnect object. """ def __init__(self, client_conn): self.client_conn = client_conn class ClientConnect(StateObject): """ A single client connection. Each connection can result in multiple HTTP Requests. Exposes the following attributes: address: (address, port) tuple, or None if the connection is replayed. requestcount: Number of requests created by this client connection. close: Is the client connection closed? error: Error string or None. """ def __init__(self, address): """ address is an (address, port) tuple, or None if this connection has been replayed from within mitmproxy. """ self.address = address self.close = False self.requestcount = 0 self.error = None def __str__(self): if self.address: return "%s:%d"%(self.address[0],self.address[1]) def _load_state(self, state): self.close = True self.error = state["error"] self.requestcount = state["requestcount"] def _get_state(self): return dict( address = list(self.address), requestcount = self.requestcount, error = self.error, ) @classmethod def _from_state(klass, state): if state: k = klass(state["address"]) k._load_state(state) return k else: return None def copy(self): return copy.copy(self) class Error(StateObject): """ An Error. This is distinct from an HTTP error response (say, a code 500), which is represented by a normal Response object. This class is responsible for indicating errors that fall outside of normal HTTP communications, like interrupted connections, timeouts, protocol errors. Exposes the following attributes: request: Request object msg: Message describing the error timestamp: Seconds since the epoch """ def __init__(self, request, msg, timestamp=None): self.request, self.msg = request, msg self.timestamp = timestamp or utils.timestamp() def _load_state(self, state): self.msg = state["msg"] self.timestamp = state["timestamp"] def copy(self): c = copy.copy(self) return c def _get_state(self): return dict( msg = self.msg, timestamp = self.timestamp, ) @classmethod def _from_state(klass, request, state): return klass( request, state["msg"], state["timestamp"], ) def replace(self, pattern, repl, *args, **kwargs): """ Replaces a regular expression pattern with repl in both the headers and the body of the request. Returns the number of replacements made. FIXME: Is replace useful on an Error object?? """ self.msg, c = utils.safe_subn(pattern, repl, self.msg, *args, **kwargs) return c class ClientPlaybackState: def __init__(self, flows, exit): self.flows, self.exit = flows, exit self.current = None def count(self): return len(self.flows) def done(self): if len(self.flows) == 0 and not self.current: return True return False def clear(self, flow): """ A request has returned in some way - if this is the one we're servicing, go to the next flow. """ if flow is self.current: self.current = None def tick(self, master, testing=False): """ testing: Disables actual replay for testing. """ if self.flows and not self.current: n = self.flows.pop(0) n.request.reply = controller.DummyReply() n.request.client_conn = None self.current = master.handle_request(n.request) if not testing and not self.current.response: master.replay_request(self.current) # pragma: no cover elif self.current.response: master.handle_response(self.current.response) class ServerPlaybackState: def __init__(self, headers, flows, exit, nopop): """ headers: Case-insensitive list of request headers that should be included in request-response matching. """ self.headers, self.exit, self.nopop = headers, exit, nopop self.fmap = {} for i in flows: if i.response: l = self.fmap.setdefault(self._hash(i), []) l.append(i) def count(self): return sum(len(i) for i in self.fmap.values()) def _hash(self, flow): """ Calculates a loose hash of the flow request. """ r = flow.request key = [ str(r.host), str(r.port), str(r.scheme), str(r.method), str(r.path), str(r.content), ] if self.headers: hdrs = [] for i in self.headers: v = r.headers[i] # Slightly subtle: we need to convert everything to strings # to prevent a mismatch between unicode/non-unicode. v = [str(x) for x in v] hdrs.append((i, v)) key.append(repr(hdrs)) return hashlib.sha256(repr(key)).digest() def next_flow(self, request): """ Returns the next flow object, or None if no matching flow was found. """ l = self.fmap.get(self._hash(request)) if not l: return None if self.nopop: return l[0] else: return l.pop(0) class StickyCookieState: def __init__(self, flt): """ flt: Compiled filter. """ self.jar = {} self.flt = flt def ckey(self, m, f): """ Returns a (domain, port, path) tuple. """ return ( m["domain"] or f.request.host, f.request.port, m["path"] or "/" ) def domain_match(self, a, b): if cookielib.domain_match(a, b): return True elif cookielib.domain_match(a, b.strip(".")): return True return False def handle_response(self, f): for i in f.response.headers["set-cookie"]: # FIXME: We now know that Cookie.py screws up some cookies with # valid RFC 822/1123 datetime specifications for expiry. Sigh. c = Cookie.SimpleCookie(str(i)) m = c.values()[0] k = self.ckey(m, f) if self.domain_match(f.request.host, k[0]): self.jar[self.ckey(m, f)] = m def handle_request(self, f): l = [] if f.match(self.flt): for i in self.jar.keys(): match = [ self.domain_match(f.request.host, i[0]), f.request.port == i[1], f.request.path.startswith(i[2]) ] if all(match): l.append(self.jar[i].output(header="").strip()) if l: f.request.stickycookie = True f.request.headers["cookie"] = l class StickyAuthState: def __init__(self, flt): """ flt: Compiled filter. """ self.flt = flt self.hosts = {} def handle_request(self, f): if "authorization" in f.request.headers: self.hosts[f.request.host] = f.request.headers["authorization"] elif f.match(self.flt): if f.request.host in self.hosts: f.request.headers["authorization"] = self.hosts[f.request.host] class Flow: """ A Flow is a collection of objects representing a single HTTP transaction. The main attributes are: request: Request object response: Response object error: Error object Note that it's possible for a Flow to have both a response and an error object. This might happen, for instance, when a response was received from the server, but there was an error sending it back to the client. The following additional attributes are exposed: intercepting: Is this flow currently being intercepted? """ def __init__(self, request): self.request = request self.response, self.error = None, None self.intercepting = False self._backup = None def copy(self): rc = self.request.copy() f = Flow(rc) if self.response: f.response = self.response.copy() f.response.request = rc if self.error: f.error = self.error.copy() f.error.request = rc return f @classmethod def _from_state(klass, state): f = klass(None) f._load_state(state) return f def _get_state(self): d = dict( request = self.request._get_state() if self.request else None, response = self.response._get_state() if self.response else None, error = self.error._get_state() if self.error else None, version = version.IVERSION ) return d def _load_state(self, state): if self.request: self.request._load_state(state["request"]) else: self.request = Request._from_state(state["request"]) if state["response"]: if self.response: self.response._load_state(state["response"]) else: self.response = Response._from_state(self.request, state["response"]) else: self.response = None if state["error"]: if self.error: self.error._load_state(state["error"]) else: self.error = Error._from_state(self.request, state["error"]) else: self.error = None def modified(self): """ Has this Flow been modified? """ # FIXME: Save a serialization in backup, compare current with # backup to detect if flow has _really_ been modified. if self._backup: return True else: return False def backup(self, force=False): """ Save a backup of this Flow, which can be reverted to using a call to .revert(). """ if not self._backup: self._backup = self._get_state() def revert(self): """ Revert to the last backed up state. """ if self._backup: self._load_state(self._backup) self._backup = None def match(self, f): """ Match this flow against a compiled filter expression. Returns True if matched, False if not. If f is a string, it will be compiled as a filter expression. If the expression is invalid, ValueError is raised. """ if isinstance(f, basestring): f = filt.parse(f) if not f: raise ValueError("Invalid filter expression.") if f: return f(self) return True def kill(self, master): """ Kill this request. """ self.error = Error(self.request, "Connection killed") self.error.reply = controller.DummyReply() if self.request and not self.request.reply.acked: self.request.reply(proxy.KILL) elif self.response and not self.response.reply.acked: self.response.reply(proxy.KILL) master.handle_error(self.error) self.intercepting = False def intercept(self): """ Intercept this Flow. Processing will stop until accept_intercept is called. """ self.intercepting = True def accept_intercept(self): """ Continue with the flow - called after an intercept(). """ if self.request: if not self.request.reply.acked: self.request.reply() elif self.response and not self.response.reply.acked: self.response.reply() self.intercepting = False def replace(self, pattern, repl, *args, **kwargs): """ Replaces a regular expression pattern with repl in all parts of the flow. Encoded content will be decoded before replacement, and re-encoded afterwards. Returns the number of replacements made. """ c = self.request.replace(pattern, repl, *args, **kwargs) if self.response: c += self.response.replace(pattern, repl, *args, **kwargs) if self.error: c += self.error.replace(pattern, repl, *args, **kwargs) return c class State(object): def __init__(self): self._flow_map = {} self._flow_list = [] self.view = [] # These are compiled filt expressions: self._limit = None self.intercept = None self._limit_txt = None @property def limit_txt(self): return self._limit_txt def flow_count(self): return len(self._flow_map) def index(self, f): return self._flow_list.index(f) def active_flow_count(self): c = 0 for i in self._flow_list: if not i.response and not i.error: c += 1 return c def add_request(self, req): """ Add a request to the state. Returns the matching flow. """ f = Flow(req) self._flow_list.append(f) self._flow_map[req] = f assert len(self._flow_list) == len(self._flow_map) if f.match(self._limit): self.view.append(f) return f def add_response(self, resp): """ Add a response to the state. Returns the matching flow. """ f = self._flow_map.get(resp.request) if not f: return False f.response = resp if f.match(self._limit) and not f in self.view: self.view.append(f) return f def add_error(self, err): """ Add an error response to the state. Returns the matching flow, or None if there isn't one. """ f = self._flow_map.get(err.request) if not f: return None f.error = err if f.match(self._limit) and not f in self.view: self.view.append(f) return f def load_flows(self, flows): self._flow_list.extend(flows) for i in flows: self._flow_map[i.request] = i self.recalculate_view() def set_limit(self, txt): if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self._limit = f self._limit_txt = txt else: self._limit = None self._limit_txt = None self.recalculate_view() def set_intercept(self, txt): if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self.intercept = f self.intercept_txt = txt else: self.intercept = None self.intercept_txt = None def recalculate_view(self): if self._limit: self.view = [i for i in self._flow_list if i.match(self._limit)] else: self.view = self._flow_list[:] def delete_flow(self, f): if f.request in self._flow_map: del self._flow_map[f.request] self._flow_list.remove(f) if f in self.view: self.view.remove(f) return True def clear(self): for i in self._flow_list[:]: self.delete_flow(i) def accept_all(self): for i in self._flow_list[:]: i.accept_intercept() def revert(self, f): f.revert() def killall(self, master): for i in self._flow_list: i.kill(master) class FlowMaster(controller.Master): def __init__(self, server, state): controller.Master.__init__(self, server) self.state = state self.server_playback = None self.client_playback = None self.kill_nonreplay = False self.script = None self.pause_scripts = False self.stickycookie_state = False self.stickycookie_txt = None self.stickyauth_state = False self.stickyauth_txt = None self.anticache = False self.anticomp = False self.refresh_server_playback = False self.replacehooks = ReplaceHooks() self.setheaders = SetHeaders() self.stream = None app.mapp.config["PMASTER"] = self def start_app(self, domain, ip): self.server.apps.add( app.mapp, domain, 80 ) self.server.apps.add( app.mapp, ip, 80 ) def add_event(self, e, level="info"): """ level: info, error """ pass def get_script(self, path): """ Returns an (error, script) tuple. """ s = script.Script(path, ScriptContext(self)) try: s.load() except script.ScriptError, v: return (v.args[0], None) ret = s.run("start") if not ret[0] and ret[1]: return ("Error in script start:\n\n" + ret[1][1], None) return (None, s) def load_script(self, path): """ Loads a script. Returns an error description if something went wrong. If path is None, the current script is terminated. """ if path is None: self.run_script_hook("done") self.script = None else: r = self.get_script(path) if r[0]: return r[0] else: if self.script: self.run_script_hook("done") self.script = r[1] def set_stickycookie(self, txt): if txt: flt = filt.parse(txt) if not flt: return "Invalid filter expression." self.stickycookie_state = StickyCookieState(flt) self.stickycookie_txt = txt else: self.stickycookie_state = None self.stickycookie_txt = None def set_stickyauth(self, txt): if txt: flt = filt.parse(txt) if not flt: return "Invalid filter expression." self.stickyauth_state = StickyAuthState(flt) self.stickyauth_txt = txt else: self.stickyauth_state = None self.stickyauth_txt = None def start_client_playback(self, flows, exit): """ flows: List of flows. """ self.client_playback = ClientPlaybackState(flows, exit) def stop_client_playback(self): self.client_playback = None def start_server_playback(self, flows, kill, headers, exit, nopop): """ flows: List of flows. kill: Boolean, should we kill requests not part of the replay? """ self.server_playback = ServerPlaybackState(headers, flows, exit, nopop) self.kill_nonreplay = kill def stop_server_playback(self): if self.server_playback.exit: self.shutdown() self.server_playback = None def do_server_playback(self, flow): """ This method should be called by child classes in the handle_request handler. Returns True if playback has taken place, None if not. """ if self.server_playback: rflow = self.server_playback.next_flow(flow) if not rflow: return None response = Response._from_state(flow.request, rflow.response._get_state()) response._set_replay() flow.response = response if self.refresh_server_playback: response.refresh() flow.request.reply(response) if self.server_playback.count() == 0: self.stop_server_playback() return True return None def tick(self, q): if self.client_playback: e = [ self.client_playback.done(), self.client_playback.exit, self.state.active_flow_count() == 0 ] if all(e): self.shutdown() self.client_playback.tick(self) return controller.Master.tick(self, q) def duplicate_flow(self, f): return self.load_flow(f.copy()) def load_flow(self, f): """ Loads a flow, and returns a new flow object. """ if f.request: f.request.reply = controller.DummyReply() fr = self.handle_request(f.request) if f.response: f.response.reply = controller.DummyReply() self.handle_response(f.response) if f.error: f.error.reply = controller.DummyReply() self.handle_error(f.error) return fr def load_flows(self, fr): """ Load flows from a FlowReader object. """ for i in fr.stream(): self.load_flow(i) def process_new_request(self, f): if self.stickycookie_state: self.stickycookie_state.handle_request(f) if self.stickyauth_state: self.stickyauth_state.handle_request(f) if self.anticache: f.request.anticache() if self.anticomp: f.request.anticomp() if self.server_playback: pb = self.do_server_playback(f) if not pb: if self.kill_nonreplay: f.kill(self) else: f.request.reply() def process_new_response(self, f): if self.stickycookie_state: self.stickycookie_state.handle_response(f) def replay_request(self, f, block=False): """ Returns None if successful, or error message if not. """ if f.intercepting: return "Can't replay while intercepting..." if f.request.content == CONTENT_MISSING: return "Can't replay request with missing content..." if f.request: f.request._set_replay() if f.request.content: f.request.headers["Content-Length"] = [str(len(f.request.content))] f.response = None f.error = None self.process_new_request(f) rt = proxy.RequestReplayThread( self.server.config, f, self.masterq, ) rt.start() # pragma: no cover if block: rt.join() def run_script_hook(self, name, *args, **kwargs): if self.script and not self.pause_scripts: ret = self.script.run(name, *args, **kwargs) if not ret[0] and ret[1]: e = "Script error:\n" + ret[1][1] self.add_event(e, "error") def handle_clientconnect(self, cc): self.run_script_hook("clientconnect", cc) cc.reply() def handle_clientdisconnect(self, r): self.run_script_hook("clientdisconnect", r) r.reply() def handle_error(self, r): f = self.state.add_error(r) if f: self.run_script_hook("error", f) if self.client_playback: self.client_playback.clear(f) r.reply() return f def handle_request(self, r): f = self.state.add_request(r) self.replacehooks.run(f) self.setheaders.run(f) self.run_script_hook("request", f) self.process_new_request(f) return f def handle_response(self, r): f = self.state.add_response(r) if f: self.replacehooks.run(f) self.setheaders.run(f) self.run_script_hook("response", f) if self.client_playback: self.client_playback.clear(f) self.process_new_response(f) if self.stream: self.stream.add(f) else: r.reply() return f def shutdown(self): if self.script: self.load_script(None) controller.Master.shutdown(self) if self.stream: for i in self.state._flow_list: if not i.response: self.stream.add(i) self.stop_stream() def start_stream(self, fp, filt): self.stream = FilteredFlowWriter(fp, filt) def stop_stream(self): self.stream.fo.close() self.stream = None class FlowWriter: def __init__(self, fo): self.fo = fo def add(self, flow): d = flow._get_state() tnetstring.dump(d, self.fo) class FlowReadError(Exception): @property def strerror(self): return self.args[0] class FlowReader: def __init__(self, fo): self.fo = fo def stream(self): """ Yields Flow objects from the dump. """ off = 0 try: while 1: data = tnetstring.load(self.fo) if tuple(data["version"][:2]) != version.IVERSION[:2]: v = ".".join(str(i) for i in data["version"]) raise FlowReadError("Incompatible serialized data version: %s"%v) off = self.fo.tell() yield Flow._from_state(data) except ValueError, v: # Error is due to EOF if self.fo.tell() == off and self.fo.read() == '': return raise FlowReadError("Invalid data format.") class FilteredFlowWriter: def __init__(self, fo, filt): self.fo = fo self.filt = filt def add(self, f): if self.filt and not f.match(self.filt): return d = f._get_state() tnetstring.dump(d, self.fo) <file_sep>#!/usr/bin/env python import sys sys.path.insert(0, "../..") import socket, tempfile, ssl, subprocess addr = socket.gethostbyname(sys.argv[1]) print ssl.get_server_certificate((addr, 443)) <file_sep>#!/usr/bin/env python """ Zap encoding in requests and inject iframe after body tag in html responses. Usage: iframe_injector http://someurl/somefile.html """ from libmproxy import controller, proxy import os import sys class InjectingMaster(controller.Master): def __init__(self, server, iframe_url): controller.Master.__init__(self, server) self._iframe_url = iframe_url def run(self): try: return controller.Master.run(self) except KeyboardInterrupt: self.shutdown() def handle_request(self, msg): if 'Accept-Encoding' in msg.headers: msg.headers["Accept-Encoding"] = 'none' msg.reply() def handle_response(self, msg): if msg.content: c = msg.replace('<body>', '<body><iframe src="%s" frameborder="0" height="0" width="0"></iframe>' % self._iframe_url) if c > 0: print 'Iframe injected!' msg.reply() def main(argv): if len(argv) != 2: print "Usage: %s IFRAME_URL" % argv[0] sys.exit(1) iframe_url = argv[1] config = proxy.ProxyConfig( cacert = os.path.expanduser("~/.mitmproxy/mitmproxy-ca.pem") ) server = proxy.ProxyServer(config, 8080) print 'Starting proxy...' m = InjectingMaster(server, iframe_url) m.run() if __name__ == '__main__': main(sys.argv) <file_sep>import os, sys import countershape from countershape import Page, Directory, PythonModule, markup, model import countershape.template sys.path.insert(0, "..") from libmproxy import filt MITMPROXY_SRC = "~/mitmproxy/mitmproxy" if ns.options.website: ns.idxpath = "doc/index.html" this.layout = countershape.Layout("_websitelayout.html") else: ns.idxpath = "index.html" this.layout = countershape.Layout("_layout.html") ns.title = countershape.template.Template(None, "<h1>@!this.title!@</h1>") this.titlePrefix = "mitmproxy 0.9 - " this.markup = markup.Markdown(extras=["footnotes"]) ns.docMaintainer = "<NAME>" ns.docMaintainerEmail = "<EMAIL>" ns.copyright = u"\u00a9 mitmproxy project, 2013" def mpath(p): p = os.path.join(MITMPROXY_SRC, p) return os.path.expanduser(p) ns.index_contents = file(mpath("README.mkd")).read() def example(s): d = file(mpath(s)).read().rstrip() extemp = """<div class="example">%s<div class="example_legend">(%s)</div></div>""" return extemp%(countershape.template.Syntax("py")(d), s) ns.example = example filt_help = [] for i in filt.filt_unary: filt_help.append( ("~%s"%i.code, i.help) ) for i in filt.filt_rex: filt_help.append( ("~%s regex"%i.code, i.help) ) for i in filt.filt_int: filt_help.append( ("~%s int"%i.code, i.help) ) filt_help.sort() filt_help.extend( [ ("!", "unary not"), ("&", "and"), ("|", "or"), ("(...)", "grouping"), ] ) ns.filt_help = filt_help def nav(page, current, state): if current.match(page, False): pre = '<li class="active">' else: pre = "<li>" p = state.application.getPage(page) return pre + '<a href="%s">%s</a></li>'%(model.UrlTo(page), p.title) ns.nav = nav pages = [ Page("index.html", "Introduction"), Page("install.html", "Installation"), Page("mitmproxy.html", "mitmproxy"), Page("mitmdump.html", "mitmdump"), Page("howmitmproxy.html", "How mitmproxy works"), Page("ssl.html", "Overview"), Directory("certinstall"), Directory("scripting"), Directory("tutorials"), Page("transparent.html", "Overview"), Directory("transparent"), ] <file_sep>def request(ctx, flow): f = ctx.duplicate_flow(flow) f.request.path = "/changed" ctx.replay_request(f) <file_sep>#!/usr/bin/env python """ This example shows how to graft a WSGI app onto mitmproxy. In this instance, we're using the Bottle framework (http://bottlepy.org/) to expose a single simplest-possible page. """ import bottle import os from libmproxy import proxy, flow @bottle.route('/') def index(): return 'Hi!' class MyMaster(flow.FlowMaster): def run(self): try: flow.FlowMaster.run(self) except KeyboardInterrupt: self.shutdown() def handle_request(self, r): f = flow.FlowMaster.handle_request(self, r) if f: r.reply() return f def handle_response(self, r): f = flow.FlowMaster.handle_response(self, r) if f: r.reply() print f return f config = proxy.ProxyConfig( cacert = os.path.expanduser("~/.mitmproxy/mitmproxy-ca.pem") ) state = flow.State() server = proxy.ProxyServer(config, 8080) # Register the app using the magic domain "proxapp" on port 80. Requests to # this domain and port combination will now be routed to the WSGI app instance. server.apps.add(bottle.app(), "proxapp", 80) m = MyMaster(server, state) m.run() <file_sep>import os, traceback class ScriptError(Exception): pass class Script: """ The instantiator should do something along this vein: s = Script(path, master) s.load() s.run("start") """ def __init__(self, path, ctx): self.path, self.ctx = path, ctx self.ns = None def load(self): """ Loads a module. Raises ScriptError on failure, with argument equal to an error message that may be a formatted traceback. """ path = os.path.expanduser(self.path) if not os.path.exists(path): raise ScriptError("No such file: %s"%self.path) if not os.path.isfile(path): raise ScriptError("Not a file: %s"%self.path) ns = {} try: execfile(path, ns, ns) except Exception, v: raise ScriptError(traceback.format_exc(v)) self.ns = ns def run(self, name, *args, **kwargs): """ Runs a plugin method. Returns: (True, retval) on success. (False, None) on nonexistent method. (False, (exc, traceback string)) if there was an exception. """ f = self.ns.get(name) if f: try: return (True, f(self.ctx, *args, **kwargs)) except Exception, v: return (False, (v, traceback.format_exc(v))) else: return (False, None) <file_sep> How to install the __mitmproxy__ certificate authority in the IOS simulator: <ol class="tlist"> <li> First, check out the <a href="https://github.com/ADVTOOLS/ADVTrustStore">ADVTrustStore</a> tool from github.</li> <li> Now, run the following command: <pre class="terminal">./iosCertTrustManager.py -a ~/.mitmproxy/mitmproxy-ca-cert.pem</pre> </li> </ol> Note that although the IOS simulator has its own certificate store, it shares the proxy settings of the host operating system. You will therefore to have configure your OSX host's proxy settings to use the mitmproxy instance you want to test with. <file_sep>import socket, time import mock from netlib import tcp, http_auth, http from libpathod import pathoc, pathod import tutils, tservers from libmproxy import flow, proxy """ Note that the choice of response code in these tests matters more than you might think. libcurl treats a 304 response code differently from, say, a 200 response code - it will correctly terminate a 304 response with no content-length header, whereas it will block forever waiting for content for a 200 response. """ class CommonMixin: def test_large(self): assert len(self.pathod("200:b@50k").content) == 1024*50 def test_replay(self): assert self.pathod("304").status_code == 304 assert len(self.master.state.view) == 1 l = self.master.state.view[0] assert l.response.code == 304 l.request.path = "/p/305" rt = self.master.replay_request(l, block=True) assert l.response.code == 305 # Disconnect error l.request.path = "/p/305:d0" rt = self.master.replay_request(l, block=True) assert l.error # Port error l.request.port = 1 self.master.replay_request(l, block=True) assert l.error def test_http(self): f = self.pathod("304") assert f.status_code == 304 l = self.master.state.view[0] assert l.request.client_conn.address assert "host" in l.request.headers assert l.response.code == 304 def test_invalid_http(self): t = tcp.TCPClient("127.0.0.1", self.proxy.port) t.connect() t.wfile.write("invalid\r\n\r\n") t.wfile.flush() assert "Bad Request" in t.rfile.readline() class AppMixin: def test_app(self): ret = self.app("/") assert ret.status_code == 200 assert "mitmproxy" in ret.content class TestHTTP(tservers.HTTPProxTest, CommonMixin, AppMixin): def test_app_err(self): p = self.pathoc() ret = p.request("get:'http://errapp/'") assert ret.status_code == 500 assert "ValueError" in ret.content def test_invalid_connect(self): t = tcp.TCPClient("127.0.0.1", self.proxy.port) t.connect() t.wfile.write("CONNECT invalid\n\n") t.wfile.flush() assert "Bad Request" in t.rfile.readline() def test_upstream_ssl_error(self): p = self.pathoc() ret = p.request("get:'https://localhost:%s/'"%self.server.port) assert ret.status_code == 400 def test_connection_close(self): # Add a body, so we have a content-length header, which combined with # HTTP1.1 means the connection is kept alive. response = '%s/p/200:b@1'%self.server.urlbase # Lets sanity check that the connection does indeed stay open by # issuing two requests over the same connection p = self.pathoc() assert p.request("get:'%s'"%response) assert p.request("get:'%s'"%response) # Now check that the connection is closed as the client specifies p = self.pathoc() assert p.request("get:'%s':h'Connection'='close'"%response) tutils.raises("disconnect", p.request, "get:'%s'"%response) def test_reconnect(self): req = "get:'%s/p/200:b@1:da'"%self.server.urlbase p = self.pathoc() assert p.request(req) # Server has disconnected. Mitmproxy should detect this, and reconnect. assert p.request(req) assert p.request(req) # However, if the server disconnects on our first try, it's an error. req = "get:'%s/p/200:b@1:d0'"%self.server.urlbase p = self.pathoc() tutils.raises("server disconnect", p.request, req) def test_proxy_ioerror(self): # Tests a difficult-to-trigger condition, where an IOError is raised # within our read loop. with mock.patch("libmproxy.proxy.ProxyHandler.read_request") as m: m.side_effect = IOError("error!") tutils.raises("server disconnect", self.pathod, "304") def test_get_connection_switching(self): def switched(l): for i in l: if "switching" in i: return True req = "get:'%s/p/200:b@1'" p = self.pathoc() assert p.request(req%self.server.urlbase) assert p.request(req%self.server2.urlbase) assert switched(self.proxy.log) def test_get_connection_err(self): p = self.pathoc() ret = p.request("get:'http://localhost:0'") assert ret.status_code == 502 def test_blank_leading_line(self): p = self.pathoc() req = "get:'%s/p/201':i0,'\r\n'" assert p.request(req%self.server.urlbase).status_code == 201 def test_invalid_headers(self): p = self.pathoc() req = p.request("get:'http://foo':h':foo'='bar'") assert req.status_code == 400 class TestHTTPAuth(tservers.HTTPProxTest): authenticator = http_auth.BasicProxyAuth(http_auth.PassManSingleUser("test", "test"), "realm") def test_auth(self): assert self.pathod("202").status_code == 407 p = self.pathoc() ret = p.request(""" get 'http://localhost:%s/p/202' h'%s'='%s' """%( self.server.port, http_auth.BasicProxyAuth.AUTH_HEADER, http.assemble_http_basic_auth("basic", "test", "test") )) assert ret.status_code == 202 class TestHTTPConnectSSLError(tservers.HTTPProxTest): certfile = True def test_go(self): p = self.pathoc() req = "connect:'localhost:%s'"%self.proxy.port assert p.request(req).status_code == 200 assert p.request(req).status_code == 400 class TestHTTPS(tservers.HTTPProxTest, CommonMixin): ssl = True ssloptions = pathod.SSLOptions(request_client_cert=True) clientcerts = True def test_clientcert(self): f = self.pathod("304") assert f.status_code == 304 assert self.server.last_log()["request"]["clientcert"]["keyinfo"] def test_sni(self): f = self.pathod("304", sni="testserver.com") assert f.status_code == 304 l = self.server.last_log() assert self.server.last_log()["request"]["sni"] == "testserver.com" def test_error_post_connect(self): p = self.pathoc() assert p.request("get:/:i0,'invalid\r\n\r\n'").status_code == 400 class TestHTTPSNoUpstream(tservers.HTTPProxTest, CommonMixin): ssl = True no_upstream_cert = True def test_cert_gen_error(self): f = self.pathoc_raw() f.connect((u"foo..bar".encode("utf8"), 0)) f.request("get:/") assert "dummy cert" in "".join(self.proxy.log) class TestHTTPSCertfile(tservers.HTTPProxTest, CommonMixin): ssl = True certfile = True def test_certfile(self): assert self.pathod("304") class TestReverse(tservers.ReverseProxTest, CommonMixin): reverse = True class TestTransparent(tservers.TransparentProxTest, CommonMixin): ssl = False class TestTransparentSSL(tservers.TransparentProxTest, CommonMixin): ssl = True def test_sni(self): f = self.pathod("304", sni="testserver.com") assert f.status_code == 304 l = self.server.last_log() assert self.server.last_log()["request"]["sni"] == "testserver.com" def test_sslerr(self): p = pathoc.Pathoc("localhost", self.proxy.port) p.connect() assert p.request("get:/").status_code == 400 class TestProxy(tservers.HTTPProxTest): def test_http(self): f = self.pathod("304") assert f.status_code == 304 l = self.master.state.view[0] assert l.request.client_conn.address assert "host" in l.request.headers assert l.response.code == 304 def test_response_timestamps(self): # test that we notice at least 2 sec delay between timestamps # in response object f = self.pathod("304:b@1k:p50,1") assert f.status_code == 304 response = self.master.state.view[0].response assert 1 <= response.timestamp_end - response.timestamp_start <= 1.2 def test_request_timestamps(self): # test that we notice a delay between timestamps in request object connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connection.connect(("127.0.0.1", self.proxy.port)) # call pathod server, wait a second to complete the request connection.send("GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n"%self.server.port) connection.send("\r\n"); connection.recv(50000) connection.close() request, response = self.master.state.view[0].request, self.master.state.view[0].response assert response.code == 304 # sanity test for our low level request assert request.timestamp_end - request.timestamp_start > 0 def test_request_timestamps_not_affected_by_client_time(self): # test that don't include user wait time in request's timestamps f = self.pathod("304:b@10k") assert f.status_code == 304 f = self.pathod("304:b@10k") assert f.status_code == 304 request = self.master.state.view[0].request assert request.timestamp_end - request.timestamp_start <= 0.1 request = self.master.state.view[1].request assert request.timestamp_end - request.timestamp_start <= 0.1 def test_request_tcp_setup_timestamp_presence(self): # tests that the first request in a tcp connection has a tcp_setup_timestamp # while others do not connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connection.connect(("localhost", self.proxy.port)) connection.send("GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n"%self.server.port) connection.send("\r\n"); connection.recv(5000) connection.send("GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n"%self.server.port) connection.send("\r\n"); connection.recv(5000) connection.close() first_request = self.master.state.view[0].request second_request = self.master.state.view[1].request assert first_request.tcp_setup_timestamp assert first_request.ssl_setup_timestamp == None assert second_request.tcp_setup_timestamp == None assert second_request.ssl_setup_timestamp == None class TestProxySSL(tservers.HTTPProxTest): ssl=True def test_request_ssl_setup_timestamp_presence(self): # tests that the ssl timestamp is present when ssl is used f = self.pathod("304:b@10k") assert f.status_code == 304 first_request = self.master.state.view[0].request assert first_request.ssl_setup_timestamp class MasterFakeResponse(tservers.TestMaster): def handle_request(self, m): resp = tutils.tresp() m.reply(resp) class TestFakeResponse(tservers.HTTPProxTest): masterclass = MasterFakeResponse def test_fake(self): f = self.pathod("200") assert "header_response" in f.headers.keys() class MasterKillRequest(tservers.TestMaster): def handle_request(self, m): m.reply(proxy.KILL) class TestKillRequest(tservers.HTTPProxTest): masterclass = MasterKillRequest def test_kill(self): tutils.raises("server disconnect", self.pathod, "200") # Nothing should have hit the server assert not self.server.last_log() class MasterKillResponse(tservers.TestMaster): def handle_response(self, m): m.reply(proxy.KILL) class TestKillResponse(tservers.HTTPProxTest): masterclass = MasterKillResponse def test_kill(self): tutils.raises("server disconnect", self.pathod, "200") # The server should have seen a request assert self.server.last_log() class EResolver(tservers.TResolver): def original_addr(self, sock): return None class TestTransparentResolveError(tservers.TransparentProxTest): resolver = EResolver def test_resolve_error(self): assert self.pathod("304").status_code == 502 class MasterIncomplete(tservers.TestMaster): def handle_request(self, m): resp = tutils.tresp() resp.content = flow.CONTENT_MISSING m.reply(resp) class TestIncompleteResponse(tservers.HTTPProxTest): masterclass = MasterIncomplete def test_incomplete(self): assert self.pathod("200").status_code == 502 <file_sep>#!/usr/bin/env python import sys, argparse, os from libmproxy import proxy, console, cmdline, version from libmproxy.console import palettes if __name__ == '__main__': parser = argparse.ArgumentParser(usage = "%(prog)s [options]") parser.add_argument('--version', action='version', version=version.NAMEVERSION) cmdline.common_options(parser) parser.add_argument("--debug", dest="debug", default=False, action="store_true") parser.add_argument( "--palette", type=str, default="dark", action="store", dest="palette", help="Select color palette: " + ", ".join(palettes.palettes.keys()) ) group = parser.add_argument_group( "Filters", "See help in mitmproxy for filter expression syntax." ) group.add_argument( "-i", "--intercept", action="store", type = str, dest="intercept", default=None, help = "Intercept filter expression." ) options = parser.parse_args() config = proxy.process_proxy_options(parser, options) if options.no_server: server = proxy.DummyServer(config) else: try: server = proxy.ProxyServer(config, options.port, options.addr) except proxy.ProxyServerError, v: print >> sys.stderr, "mitmproxy:", v.args[0] sys.exit(1) try: opts = console.Options(**cmdline.get_common_options(options)) except cmdline.OptionException, v: parser.error(v.message) opts.intercept = options.intercept opts.debug = options.debug opts.palette = options.palette spec = "" for i in ["LANG", "LC_CTYPE", "LC_ALL"]: spec += os.environ.get(i, "").lower() if "utf" not in spec: print >> sys.stderr, "Error: mitmproxy requires a UTF console environment." print >> sys.stderr, "Set your LANG enviroment variable to something like en_US.UTF-8" sys.exit(1) m = console.ConsoleMaster(server, opts) try: m.run() except KeyboardInterrupt: pass <file_sep>log = [] def clientconnect(ctx, cc): ctx.log("XCLIENTCONNECT") log.append("clientconnect") def request(ctx, r): ctx.log("XREQUEST") log.append("request") def response(ctx, r): ctx.log("XRESPONSE") log.append("response") def clientdisconnect(ctx, cc): ctx.log("XCLIENTDISCONNECT") log.append("clientdisconnect") def error(ctx, cc): ctx.log("XERROR") log.append("error") <file_sep>""" This is a script stub, with definitions for all events. """ def start(ctx): """ Called once on script startup, before any other events. """ ctx.log("start") def clientconnect(ctx, client_connect): """ Called when a client initiates a connection to the proxy. Note that a connection can correspond to multiple HTTP requests """ ctx.log("clientconnect") def request(ctx, flow): """ Called when a client request has been received. """ ctx.log("request") def response(ctx, flow): """ Called when a server response has been received. """ ctx.log("response") def error(ctx, flow): """ Called when a flow error has occured, e.g. invalid server responses, or interrupted connections. This is distinct from a valid server HTTP error response, which is simply a response with an HTTP error code. """ ctx.log("error") def clientdisconnect(ctx, client_disconnect): """ Called when a client disconnects from the proxy. """ ctx.log("clientdisconnect") def done(ctx): """ Called once on script shutdown, after any other events. """ ctx.log("done") <file_sep>import sys, os, string, socket, time import shutil, tempfile, threading import SocketServer from OpenSSL import SSL from netlib import odict, tcp, http, wsgi, certutils, http_status, http_auth import utils, flow, version, platform, controller KILL = 0 class ProxyError(Exception): def __init__(self, code, msg, headers=None): self.code, self.msg, self.headers = code, msg, headers def __str__(self): return "ProxyError(%s, %s)"%(self.code, self.msg) class Log: def __init__(self, msg): self.msg = msg class ProxyConfig: def __init__(self, certfile = None, cacert = None, clientcerts = None, no_upstream_cert=False, body_size_limit = None, reverse_proxy=None, transparent_proxy=None, authenticator=None): self.certfile = certfile self.cacert = cacert self.clientcerts = clientcerts self.no_upstream_cert = no_upstream_cert self.body_size_limit = body_size_limit self.reverse_proxy = reverse_proxy self.transparent_proxy = transparent_proxy self.authenticator = authenticator self.certstore = certutils.CertStore() class ServerConnection(tcp.TCPClient): def __init__(self, config, scheme, host, port, sni): tcp.TCPClient.__init__(self, host, port) self.config = config self.scheme, self.sni = scheme, sni self.requestcount = 0 self.tcp_setup_timestamp = None self.ssl_setup_timestamp = None def connect(self): tcp.TCPClient.connect(self) self.tcp_setup_timestamp = time.time() if self.scheme == "https": clientcert = None if self.config.clientcerts: path = os.path.join(self.config.clientcerts, self.host.encode("idna")) + ".pem" if os.path.exists(path): clientcert = path try: self.convert_to_ssl(cert=clientcert, sni=self.sni) self.ssl_setup_timestamp = time.time() except tcp.NetLibError, v: raise ProxyError(400, str(v)) def send(self, request): self.requestcount += 1 d = request._assemble() if not d: raise ProxyError(502, "Cannot transmit an incomplete request.") self.wfile.write(d) self.wfile.flush() def terminate(self): if self.connection: try: self.wfile.flush() except tcp.NetLibDisconnect: # pragma: no cover pass self.connection.close() class RequestReplayThread(threading.Thread): def __init__(self, config, flow, masterq): self.config, self.flow, self.channel = config, flow, controller.Channel(masterq) threading.Thread.__init__(self) def run(self): try: r = self.flow.request server = ServerConnection(self.config, r.scheme, r.host, r.port, r.host) server.connect() server.send(r) tsstart = utils.timestamp() httpversion, code, msg, headers, content = http.read_response( server.rfile, r.method, self.config.body_size_limit ) response = flow.Response( self.flow.request, httpversion, code, msg, headers, content, server.cert, server.rfile.first_byte_timestamp ) self.channel.ask(response) except (ProxyError, http.HttpError, tcp.NetLibError), v: err = flow.Error(self.flow.request, str(v)) self.channel.ask(err) class HandleSNI: def __init__(self, handler, client_conn, host, port, cert, key): self.handler, self.client_conn, self.host, self.port = handler, client_conn, host, port self.cert, self.key = cert, key def __call__(self, connection): try: sn = connection.get_servername() if sn: self.handler.get_server_connection(self.client_conn, "https", self.host, self.port, sn) new_context = SSL.Context(SSL.TLSv1_METHOD) new_context.use_privatekey_file(self.key) new_context.use_certificate(self.cert.x509) connection.set_context(new_context) self.handler.sni = sn.decode("utf8").encode("idna") # An unhandled exception in this method will core dump PyOpenSSL, so # make dang sure it doesn't happen. except Exception, e: # pragma: no cover pass class ProxyHandler(tcp.BaseHandler): def __init__(self, config, connection, client_address, server, channel, server_version): self.channel, self.server_version = channel, server_version self.config = config self.proxy_connect_state = None self.sni = None self.server_conn = None tcp.BaseHandler.__init__(self, connection, client_address, server) def get_server_connection(self, cc, scheme, host, port, sni): """ When SNI is in play, this means we have an SSL-encrypted connection, which means that the entire handler is dedicated to a single server connection - no multiplexing. If this assumption ever breaks, we'll have to do something different with the SNI host variable on the handler object. """ sc = self.server_conn if not sni: sni = host if sc and (scheme, host, port, sni) != (sc.scheme, sc.host, sc.port, sc.sni): sc.terminate() self.server_conn = None self.log( cc, "switching connection", [ "%s://%s:%s (sni=%s) -> %s://%s:%s (sni=%s)"%( scheme, host, port, sni, sc.scheme, sc.host, sc.port, sc.sni ) ] ) if not self.server_conn: try: self.server_conn = ServerConnection(self.config, scheme, host, port, sni) self.server_conn.connect() except tcp.NetLibError, v: raise ProxyError(502, v) return self.server_conn def del_server_connection(self): if self.server_conn: self.server_conn.terminate() self.server_conn = None def handle(self): cc = flow.ClientConnect(self.client_address) self.log(cc, "connect") self.channel.ask(cc) while self.handle_request(cc) and not cc.close: pass cc.close = True self.del_server_connection() cd = flow.ClientDisconnect(cc) self.log( cc, "disconnect", [ "handled %s requests"%cc.requestcount] ) self.channel.tell(cd) def handle_request(self, cc): try: request, err = None, None request = self.read_request(cc) if request is None: return cc.requestcount += 1 app = self.server.apps.get(request) if app: err = app.serve(request, self.wfile) if err: self.log(cc, "Error in wsgi app.", err.split("\n")) return else: request_reply = self.channel.ask(request) if request_reply is None or request_reply == KILL: return elif isinstance(request_reply, flow.Response): request = False response = request_reply response_reply = self.channel.ask(response) else: request = request_reply if self.config.reverse_proxy: scheme, host, port = self.config.reverse_proxy else: scheme, host, port = request.scheme, request.host, request.port # If we've already pumped a request over this connection, # it's possible that the server has timed out. If this is # the case, we want to reconnect without sending an error # to the client. while 1: sc = self.get_server_connection(cc, scheme, host, port, self.sni) sc.send(request) if sc.requestcount == 1: # add timestamps only for first request (others are not directly affected) request.tcp_setup_timestamp = sc.tcp_setup_timestamp request.ssl_setup_timestamp = sc.ssl_setup_timestamp sc.rfile.reset_timestamps() try: tsstart = utils.timestamp() httpversion, code, msg, headers, content = http.read_response( sc.rfile, request.method, self.config.body_size_limit ) except http.HttpErrorConnClosed, v: self.del_server_connection() if sc.requestcount > 1: continue else: raise except http.HttpError, v: raise ProxyError(502, "Invalid server response.") else: break response = flow.Response( request, httpversion, code, msg, headers, content, sc.cert, sc.rfile.first_byte_timestamp ) response_reply = self.channel.ask(response) # Not replying to the server invalidates the server # connection, so we terminate. if response_reply == KILL: sc.terminate() if response_reply == KILL: return else: response = response_reply self.send_response(response) if request and http.request_connection_close(request.httpversion, request.headers): return # We could keep the client connection when the server # connection needs to go away. However, we want to mimic # behaviour as closely as possible to the client, so we # disconnect. if http.response_connection_close(response.httpversion, response.headers): return except (IOError, ProxyError, http.HttpError, tcp.NetLibError), e: if hasattr(e, "code"): cc.error = "%s: %s"%(e.code, e.msg) else: cc.error = str(e) if request: err = flow.Error(request, cc.error) self.channel.ask(err) self.log( cc, cc.error, ["url: %s"%request.get_url()] ) else: self.log(cc, cc.error) if isinstance(e, ProxyError): self.send_error(e.code, e.msg, e.headers) else: return True def log(self, cc, msg, subs=()): msg = [ "%s:%s: "%cc.address + msg ] for i in subs: msg.append(" -> "+i) msg = "\n".join(msg) l = Log(msg) self.channel.tell(l) def find_cert(self, cc, host, port, sni): if self.config.certfile: return certutils.SSLCert.from_pem(file(self.config.certfile, "r").read()) else: sans = [] if not self.config.no_upstream_cert: conn = self.get_server_connection(cc, "https", host, port, sni) sans = conn.cert.altnames host = conn.cert.cn.decode("utf8").encode("idna") ret = self.config.certstore.get_cert(host, sans, self.config.cacert) if not ret: raise ProxyError(502, "Unable to generate dummy cert.") return ret def get_line(self, fp): """ Get a line, possibly preceded by a blank. """ line = fp.readline() if line == "\r\n" or line == "\n": # Possible leftover from previous message line = fp.readline() return line def read_request_transparent(self, client_conn): orig = self.config.transparent_proxy["resolver"].original_addr(self.connection) if not orig: raise ProxyError(502, "Transparent mode failure: could not resolve original destination.") self.log(client_conn, "transparent to %s:%s"%orig) host, port = orig if port in self.config.transparent_proxy["sslports"]: scheme = "https" if not self.ssl_established: dummycert = self.find_cert(client_conn, host, port, host) sni = HandleSNI( self, client_conn, host, port, dummycert, self.config.certfile or self.config.cacert ) try: self.convert_to_ssl(dummycert, self.config.certfile or self.config.cacert, handle_sni=sni) except tcp.NetLibError, v: raise ProxyError(400, str(v)) else: scheme = "http" line = self.get_line(self.rfile) if line == "": return None r = http.parse_init_http(line) if not r: raise ProxyError(400, "Bad HTTP request line: %s"%repr(line)) method, path, httpversion = r headers = self.read_headers(authenticate=False) content = http.read_http_body_request( self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit ) return flow.Request( client_conn,httpversion, host, port, scheme, method, path, headers, content, self.rfile.first_byte_timestamp, utils.timestamp() ) def read_request_proxy(self, client_conn): line = self.get_line(self.rfile) if line == "": return None if not self.proxy_connect_state: connparts = http.parse_init_connect(line) if connparts: host, port, httpversion = connparts headers = self.read_headers(authenticate=True) self.wfile.write( 'HTTP/1.1 200 Connection established\r\n' + ('Proxy-agent: %s\r\n'%self.server_version) + '\r\n' ) self.wfile.flush() dummycert = self.find_cert(client_conn, host, port, host) sni = HandleSNI( self, client_conn, host, port, dummycert, self.config.certfile or self.config.cacert ) try: self.convert_to_ssl(dummycert, self.config.certfile or self.config.cacert, handle_sni=sni) except tcp.NetLibError, v: raise ProxyError(400, str(v)) self.proxy_connect_state = (host, port, httpversion) line = self.rfile.readline(line) if self.proxy_connect_state: r = http.parse_init_http(line) if not r: raise ProxyError(400, "Bad HTTP request line: %s"%repr(line)) method, path, httpversion = r headers = self.read_headers(authenticate=False) host, port, _ = self.proxy_connect_state content = http.read_http_body_request( self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit ) return flow.Request( client_conn, httpversion, host, port, "https", method, path, headers, content, self.rfile.first_byte_timestamp, utils.timestamp() ) else: r = http.parse_init_proxy(line) if not r: raise ProxyError(400, "Bad HTTP request line: %s"%repr(line)) method, scheme, host, port, path, httpversion = r headers = self.read_headers(authenticate=True) content = http.read_http_body_request( self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit ) return flow.Request( client_conn, httpversion, host, port, scheme, method, path, headers, content, self.rfile.first_byte_timestamp, utils.timestamp() ) def read_request_reverse(self, client_conn): line = self.get_line(self.rfile) if line == "": return None scheme, host, port = self.config.reverse_proxy r = http.parse_init_http(line) if not r: raise ProxyError(400, "Bad HTTP request line: %s"%repr(line)) method, path, httpversion = r headers = self.read_headers(authenticate=False) content = http.read_http_body_request( self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit ) return flow.Request( client_conn, httpversion, host, port, "http", method, path, headers, content, self.rfile.first_byte_timestamp, utils.timestamp() ) def read_request(self, client_conn): self.rfile.reset_timestamps() if self.config.transparent_proxy: return self.read_request_transparent(client_conn) elif self.config.reverse_proxy: return self.read_request_reverse(client_conn) else: return self.read_request_proxy(client_conn) def read_headers(self, authenticate=False): headers = http.read_headers(self.rfile) if headers is None: raise ProxyError(400, "Invalid headers") if authenticate and self.config.authenticator: if self.config.authenticator.authenticate(headers): self.config.authenticator.clean(headers) else: raise ProxyError( 407, "Proxy Authentication Required", self.config.authenticator.auth_challenge_headers() ) return headers def send_response(self, response): d = response._assemble() if not d: raise ProxyError(502, "Cannot transmit an incomplete response.") self.wfile.write(d) self.wfile.flush() def send_error(self, code, body, headers): try: response = http_status.RESPONSES.get(code, "Unknown") html_content = '<html><head>\n<title>%d %s</title>\n</head>\n<body>\n%s\n</body>\n</html>'%(code, response, body) self.wfile.write("HTTP/1.1 %s %s\r\n" % (code, response)) self.wfile.write("Server: %s\r\n"%self.server_version) self.wfile.write("Content-type: text/html\r\n") self.wfile.write("Content-Length: %d\r\n"%len(html_content)) for key, value in headers.items(): self.wfile.write("%s: %s\r\n"%(key, value)) self.wfile.write("Connection: close\r\n") self.wfile.write("\r\n") self.wfile.write(html_content) self.wfile.flush() except: pass class ProxyServerError(Exception): pass class ProxyServer(tcp.TCPServer): allow_reuse_address = True bound = True def __init__(self, config, port, address='', server_version=version.NAMEVERSION): """ Raises ProxyServerError if there's a startup problem. """ self.config, self.port, self.address = config, port, address self.server_version = server_version try: tcp.TCPServer.__init__(self, (address, port)) except socket.error, v: raise ProxyServerError('Error starting proxy server: ' + v.strerror) self.channel = None self.apps = AppRegistry() def start_slave(self, klass, channel): slave = klass(channel, self) slave.start() def set_channel(self, channel): self.channel = channel def handle_connection(self, request, client_address): h = ProxyHandler(self.config, request, client_address, self, self.channel, self.server_version) h.handle() h.finish() class AppRegistry: def __init__(self): self.apps = {} def add(self, app, domain, port): """ Add a WSGI app to the registry, to be served for requests to the specified domain, on the specified port. """ self.apps[(domain, port)] = wsgi.WSGIAdaptor(app, domain, port, version.NAMEVERSION) def get(self, request): """ Returns an WSGIAdaptor instance if request matches an app, or None. """ if (request.host, request.port) in self.apps: return self.apps[(request.host, request.port)] if "host" in request.headers: host = request.headers["host"][0] return self.apps.get((host, request.port), None) class DummyServer: bound = False def __init__(self, config): self.config = config def start_slave(self, *args): pass def shutdown(self): pass # Command-line utils def certificate_option_group(parser): group = parser.add_argument_group("SSL") group.add_argument( "--cert", action="store", type = str, dest="cert", default=None, help = "User-created SSL certificate file." ) group.add_argument( "--client-certs", action="store", type = str, dest = "clientcerts", default=None, help = "Client certificate directory." ) TRANSPARENT_SSL_PORTS = [443, 8443] def process_proxy_options(parser, options): if options.cert: options.cert = os.path.expanduser(options.cert) if not os.path.exists(options.cert): return parser.error("Manually created certificate does not exist: %s"%options.cert) cacert = os.path.join(options.confdir, "mitmproxy-ca.pem") cacert = os.path.expanduser(cacert) if not os.path.exists(cacert): certutils.dummy_ca(cacert) body_size_limit = utils.parse_size(options.body_size_limit) if options.reverse_proxy and options.transparent_proxy: return parser.error("Can't set both reverse proxy and transparent proxy.") if options.transparent_proxy: if not platform.resolver: return parser.error("Transparent mode not supported on this platform.") trans = dict( resolver = platform.resolver(), sslports = TRANSPARENT_SSL_PORTS ) else: trans = None if options.reverse_proxy: rp = utils.parse_proxy_spec(options.reverse_proxy) if not rp: return parser.error("Invalid reverse proxy specification: %s"%options.reverse_proxy) else: rp = None if options.clientcerts: options.clientcerts = os.path.expanduser(options.clientcerts) if not os.path.exists(options.clientcerts) or not os.path.isdir(options.clientcerts): return parser.error("Client certificate directory does not exist or is not a directory: %s"%options.clientcerts) if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd): if options.auth_singleuser: if len(options.auth_singleuser.split(':')) != 2: return parser.error("Invalid single-user specification. Please use the format username:password") username, password = options.auth_singleuser.split(':') password_manager = http_auth.PassManSingleUser(username, password) elif options.auth_nonanonymous: password_manager = http_auth.PassManNonAnon() elif options.auth_htpasswd: try: password_manager = http_auth.PassManHtpasswd(options.auth_htpasswd) except ValueError, v: return parser.error(v.message) authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy") else: authenticator = http_auth.NullProxyAuth(None) return ProxyConfig( certfile = options.cert, cacert = cacert, clientcerts = options.clientcerts, body_size_limit = body_size_limit, no_upstream_cert = options.no_upstream_cert, reverse_proxy = rp, transparent_proxy = trans, authenticator = authenticator ) <file_sep>#!/usr/bin/env python import sys, signal # The unneccesary console import here is to work around a bug in pyinstaller from libmproxy import proxy, dump, cmdline, version, console import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(usage = "%(prog)s [options] [filter]") parser.add_argument('--version', action='version', version=version.NAMEVERSION) cmdline.common_options(parser) parser.add_argument( "--keepserving", action="store_true", dest="keepserving", default=False, help="Continue serving after client playback or file read. We exit by default." ) parser.add_argument('args', nargs=argparse.REMAINDER) options = parser.parse_args() if options.quiet: options.verbose = 0 proxyconfig = proxy.process_proxy_options(parser, options) if options.no_server: server = proxy.DummyServer(proxyconfig) else: try: server = proxy.ProxyServer(proxyconfig, options.port, options.addr) except proxy.ProxyServerError, v: print >> sys.stderr, "mitmdump:", v.args[0] sys.exit(1) try: dumpopts = dump.Options(**cmdline.get_common_options(options)) except cmdline.OptionException, v: parser.error(v.message) dumpopts.keepserving = options.keepserving if options.args: filt = " ".join(options.args) else: filt = None try: m = dump.DumpMaster(server, dumpopts, filt) def cleankill(*args, **kwargs): m.shutdown() signal.signal(signal.SIGTERM, cleankill) m.run() except dump.DumpError, e: print >> sys.stderr, "mitmdump:", e sys.exit(1) except KeyboardInterrupt: pass <file_sep>from countershape import Page pages = [ Page("firefox.html", "Firefox"), Page("osx.html", "OSX"), Page("windows7.html", "Windows 7"), Page("ios.html", "IOS"), Page("ios-simulator.html", "IOS Simulator"), Page("android.html", "Android"), ] <file_sep>from countershape import Page pages = [ Page("anticache.html", "Anticache"), Page("clientreplay.html", "Client-side replay"), Page("filters.html", "Filter expressions"), Page("setheaders.html", "Set Headers"), Page("serverreplay.html", "Server-side replay"), Page("sticky.html", "Sticky cookies and auth"), Page("proxyauth.html", "Proxy Authentication"), Page("replacements.html", "Replacements"), Page("reverseproxy.html", "Reverse proxy mode"), Page("upstreamcerts.html", "Upstream Certs"), ] <file_sep>import flask mapp = flask.Flask(__name__) mapp.debug = True @mapp.route("/") def index(): return flask.render_template("index.html", section="home") @mapp.route("/certs") def certs(): return flask.render_template("certs.html", section="certs")
f485a6324566f3e94a7545c34b9a140d32f54ea1
[ "HTML", "Markdown", "INI", "Python", "Text", "Shell" ]
45
Python
sebdraven/mitmproxy
6a31d3271219dffc9786f08e387ad5dc812fe86c
f017e7b02251f8f44f77f16af3f94ac2ca4a1da1
refs/heads/master
<file_sep>import React, { Component } from "react"; class LeaderboardTableRow extends Component { render() { return ( <tr> <th scope="row">{this.props.index}</th> <td>{this.props.name}</td> <td>{this.props.location}</td> <td>{this.props.gender}</td> <td>{this.props.successStreak}</td> <td>{this.props.challengeScore}</td> </tr> ); } } export default LeaderboardTableRow; <file_sep>import React, { Component } from 'react'; import ReactDOM from 'react-dom'; class Question extends Component { state = { }; render () { return ( <div className="form-group"> <h1 className="questions">{this.props.questionTitle}</h1> <select className="form-control" id="quest1"> <option value="1">{this.props.option1}</option> <option value="5">{this.props.option2}</option> <option value="10">{this.props.option3}</option> <option value="15">{this.props.option4}</option> </select> </div> ); } } export default Question; <file_sep>// subscribe/unsubscribe user // save subscription object to mongo // set up admin page // loop through db and send push to all subscribed endpoints // var app = (function() { var isSubscribed = false; var swRegistration = null; var applicationServerPublicKey = '<KEY>'; // TODO 2.1 - check for notification support if ('serviceWorker' in navigator && 'PushManager' in window) { // console.log('Service Worker and Push is supported'); navigator.serviceWorker.register('service-worker.js') .then(function(swReg) { console.log('Service Worker is registered', swReg); swRegistration = swReg; Notification.requestPermission(function(status) { console.log('Notification permission status:', status); if (status === "granted") { swRegistration.pushManager.getSubscription() .then(function(subscription) { if(subscription === null) { subscribeUser(); } }); } }); // TODO 3.3a - call the initializeUI() function // initializeUI(); // displayNotification(); }) .catch(function(error) { console.error('Service Worker Error', error); }); } else { console.warn('Push messaging is not supported'); } // TODO 2.2 - request permission to show notifications // Notification.requestPermission(function(status) { // console.log('Notification permission status:', status); // if (status === "granted") { // navigator.serviceWorker.getRegistration().then(function(reg) { // subscribeUser(); // }); // } // }); // initializeUI(); // admin page sends to route on server, server sends push to ?? // fires node/main.js??? function displayNotification() { // TODO 2.3 - display a Notification if (Notification.permission == 'granted') { navigator.serviceWorker.getRegistration().then(function(reg) { // TODO 2.4 - Add 'options' object to configure the notification var options = { body: 'First notification!', tag: 'id1', vibrate: [100, 50, 100], data: { dateOfArrival: Date.now(), primaryKey: 1 }, // TODO 2.5 - add actions to the notification actions: [{ action: 'explore', title: 'Go to the site', }, { action: 'close', title: 'Close the notification', }, ] // TODO 5.1 - add a tag to the notification }; reg.showNotification('Hello world!', options); }); } } function initializeUI() { swRegistration.pushManager.getSubscription() .then(function(subscription) { // console.log('sub', subscription); // if no subscription object found subscription === null isSubscribed = (subscription !== null); updateSubscriptionOnServer(subscription); if (isSubscribed) { console.log('User IS subscribed.'); } else { console.log('User is NOT subscribed.'); } }); } // TODO 4.2a - add VAPID public key function subscribeUser() { var applicationServerKey = urlB64ToUint8Array(applicationServerPublicKey); swRegistration.pushManager.subscribe({ userVisibleOnly: true, applicationServerKey: applicationServerKey }) .then(function(subscription) { console.log('User is subscribed:', subscription); updateSubscriptionOnServer(subscription); isSubscribed = true; }) .catch(function(err) { if (Notification.permission === 'denied') { console.warn('Permission for notifications was denied'); } else { console.error('Failed to subscribe the user: ', err); } }); } function unsubscribeUser() { // TODO 3.5 - unsubscribe from the push service swRegistration.pushManager.getSubscription() .then(function(subscription) { if (subscription) { return subscription.unsubscribe(); } }) .catch(function(error) { console.log('Error unsubscribing', error); }) .then(function() { updateSubscriptionOnServer(null); console.log('User is unsubscribed'); isSubscribed = false; }); } function updateSubscriptionOnServer(subscription) { if (subscription !== null) { let xhttp = new XMLHttpRequest(); xhttp.open('POST', '/subscriptions', true); xhttp.setRequestHeader("Content-type", "application/json"); xhttp.onreadystatechange = function() { if (this.readyState == 4 && this.status == 200) { console.log('post success'); } }; xhttp.send(JSON.stringify(subscription)); } // Here's where you would send the subscription to the application server // var subscriptionJson = document.querySelector('.js-subscription-json'); // var endpointURL = document.querySelector('.js-endpoint-url'); // var subAndEndpoint = document.querySelector('.js-sub-endpoint'); // if (subscription) { // subscriptionJson.textContent = JSON.stringify(subscription); // endpointURL.textContent = subscription.endpoint; // subAndEndpoint.style.display = 'block'; // } else { // subAndEndpoint.style.display = 'none'; // } } function urlB64ToUint8Array(base64String) { var padding = '='.repeat((4 - base64String.length % 4) % 4); var base64 = (base64String + padding) .replace(/\-/g, '+') .replace(/_/g, '/'); var rawData = window.atob(base64); var outputArray = new Uint8Array(rawData.length); for (var i = 0; i < rawData.length; ++i) { outputArray[i] = rawData.charCodeAt(i); } return outputArray; } // })(); // function registerServiceWorker() { // return navigator.serviceWorker.register('service-worker.js') // .then(function(registration) { // console.log('Service worker successfully registered.'); // return registration; // }) // .catch(function(err) { // console.error('Unable to register service worker.', err); // }); // } // function askPermission() { // return new Promise(function(resolve, reject) { // const permissionResult = Notification.requestPermission(function(result) { // resolve(result); // }); // if (permissionResult) { // permissionResult.then(resolve, reject); // } // }) // .then(function(permissionResult) { // if (permissionResult !== 'granted') { // throw new Error('We weren\'t granted permission.'); // } // }); // } // (function() { // 'use strict'; // if (!('serviceWorker' in navigator)) { // console.log('Service worker not supported'); // return; // } // navigator.serviceWorker.register('service-worker.js', { // }) // .then(function(registration) { // console.log('Registered at scope:', registration.scope); // }) // .catch(function(error) { // console.log('Registration failed:', error); // }); // })(); // // if (!('serviceWorker' in navigator)) { // // console.log('Service worker not supported'); // // // return; // // } // // navigator.serviceWorker.register('service-worker.js') // // .then(function() { // // console.log('Registered'); // // }) // // .catch(function(error) { // // console.log('Registration failed:', error); // // }); // // if(navigator.serviceWorker && window.PushManager) { // // registerServiceWorker(); // // askPermission(); // // }<file_sep>const jwt = require('jsonwebtoken'); const config = require('../config/config.js'); const db = require('../models'); module.exports = (req, res, next) => { const authorizationHeader = req.headers['authorization']; let token; if (authorizationHeader) { token = authorizationHeader.split(' ')[1]; } if (token) { jwt.verify(token, config.jwtSecret, (err, decoded) => { if (err) { res.status(401).json({ error: 'Failed to authenticate' }); } else { db.User.find({ name: decoded.name }) .then(res => { if (!res) { res.status(404).json({ error: 'No such user' }); } else { req.currentUser = res[0].name; next(); } }); } }); } else { res.status(403).json({ error: 'No token provided' }); } }<file_sep># CarbonSense Carbon sense is a full stack MERN app designed to help users reduce their carbon footprint through daily tracking of 10 predetermined categories. Users are encouraged to log their daily activities and attempt to keep their score below a certain threshold. The leaderboard allows users to compete against one another and vie for the title of most eco-friendly. If you'd like to try out CarbonSense it's currently deployed on Heroku at: [CarbonSense](https://secure-island-35963.herokuapp.com/login) Feel free to use the guest login credentials: email: <EMAIL> password: <PASSWORD> Technologies used include: * React, React-Router * Redux * Json Web Tokens * MongoDB<file_sep>import React, { Component } from 'react'; import './AdminForm.css'; class AdminForm extends Component { render() { return( <div> <div className='row pushBox'> <div className='col-8 offset-2'> <h3>Admin Form</h3> </div> </div> </div> ); } } export default AdminForm;<file_sep>const mongoose = require('mongoose'); const Schema = mongoose.Schema; const questionSchema = new mongoose.Schema({ question: String, options: [{optionText: String, optionValue: String}], active: { type: boolean, default: true } }); const User = mongoose.model('User', userSchema); module.exports = User;<file_sep>import { color } from 'd3-color'; import { interpolateRgb } from 'd3-interpolate'; import React, { Component } from 'react'; import ReactDOM from 'react-dom'; import LiquidFillGauge from 'react-liquid-gauge'; import "./DashboardGraph.css" import axios from "axios"; class ProfileGraph extends Component { state = { value: 1, gaugeTarget: 100 }; startColor = '#000000'; // cornflowerblue endColor = '#000000'; // crimson updateGaugeTarget = () => { axios.put("/gaugeTarget") .then(res => { this.setState({ gaugeTarget: res.data.gaugeTarget }); console.log("results", this.state.gaugeTarget); }) .catch(err => console.log(err)); } render() { const radius = 205; const interpolate = interpolateRgb(this.startColor, this.endColor); const fillColor = interpolate(this.state.value / 100); const gradientStops = [ { key: '0%', stopColor: color(fillColor).darker(0.5).toString(), stopOpacity: 1, offset: '0%' }, { key: '50%', stopColor: fillColor, stopOpacity: 0.75, offset: '50%' }, { key: '80%', stopColor: color("#000000").darker(0.5).toString(), stopOpacity: 0.75, offset: '50%' }, { key: '100%', stopColor: color(fillColor).brighter(0.5).toString(), stopOpacity: 0.5, offset: '100%' } ]; return ( <div className="DashboardGraph" style={{marginBottom:150}}> <h1>Max Carbon Points: {this.state.gaugeTarget}</h1> <LiquidFillGauge style={{ margin: '0 auto' }} width={radius * 2} height={radius * 2} value={this.props.value} percent="%" textSize={1} textOffsetX={0} textOffsetY={0} textRenderer={(props) => { const value = Math.round(props.value); const radius = Math.min(props.height / 2, props.width / 2); const textPixels = (props.textSize * radius / 2); const valueStyle = { fontSize: textPixels }; const percentStyle = { fontSize: textPixels * 0.6 }; return ( <tspan> <tspan className="value" style={valueStyle}>{value}</tspan> <tspan style={percentStyle}>{props.percent}</tspan> </tspan> ); }} riseAnimation waveAnimation waveFrequency={2} waveAmplitude={1} gradient gradientStops={gradientStops} circleStyle={{ fill: fillColor }} waveStyle={{ fill: fillColor }} textStyle={{ fill: color('white').toString(), fontFamily: 'Arial' }} waveTextStyle={{ fill: color('#ff0000').toString(), fontFamily: 'Arial' }} /*onClick={() => { this.setState({ value: Math.random() * 100 }); }}*/ /> <div style={{ margin: '20px auto', width: 120 }} > </div> </div> ); } }; export default ProfileGraph;<file_sep>import React, { Component } from 'react'; import AdminForm from '../../components/AdminForm/AdminForm.jsx'; import './Admin.css'; class Admin extends Component { render() { return( <AdminForm /> ); } } export default Admin;<file_sep>import React, { Component } from 'react'; import "./SignUpForm.css" class SignUpForm extends Component { render() { return( <div style={{paddingTop: 150, color: "white", marginBottom: 110}}> <h1>Sign Up</h1> <form> <div className="form-group col-4 offset-4"> <label htmlFor="email">Email</label> <input type="email" className="form-control" id="email" placeholder="<EMAIL>" value={this.props.value} onChange={this.props.handleInputChange} name="name" /> </div> <div className="form-group col-4 offset-4"> <label htmlFor="password">Password</label> <input type="password" className="form-control" id="password" placeholder="********" value={this.props.value} onChange={this.props.handleInputChange} name="password"/> </div> <div className="form-group col-4 offset-4"> <label htmlFor="confirmPassword">Confirm Password</label> <input type="password" className="form-control" id="confirmPassword" placeholder="********" value={this.props.value} onChange={this.props.handleInputChange} name="confirmPassword"/> </div> </form> <button onClick={this.props.handleFormSubmit} className="btn btn-lg btn-warning btn-signUp">Sign Up</button> </div> ); } } export default SignUpForm;<file_sep>const mongoose = require('mongoose'); const Schema = mongoose.Schema; const userSchema = new mongoose.Schema({ name: { type: String, required: true }, password: { type: String, required: true }, birthday: { type: Date, default: 1 / 1 / 1980 }, gender: { type: String, default: "" }, location: { type: String, default: "" }, image: { type: String, default: "" }, gaugeTarget: { type: Number, default: 100 }, dailyScores: [{ date: { type: String }, score: { type: Number } }], successStreak: { //add to this number for each successful day (day below guageTarget) type: Number, default: 0 }, challengeScore: { type: Number, default: 0 } }); const User = mongoose.model('User', userSchema); module.exports = User;<file_sep>import React, { Component } from 'react'; import "./leaderboardTable.css"; class LeaderboardTable extends Component { render() { return( <div className="container"> <table className="table table-striped"> <thead> <tr> <th scope="col">Rank</th> <th scope="col">Username</th> <th scope="col">Location</th> {/* <th scope="col">Age</th> */} <th scope="col">Gender</th> <th scope="col">Success Streak</th> <th scope="col">Challenge Score</th> </tr> </thead> <tbody> {this.props.children} </tbody> </table> </div> ); } } export default LeaderboardTable;<file_sep>import React from 'react'; const Header = () => ( <div className="headerWrapper"> <div className="col-8"> <div className="Head"> <h1>Dashboard</h1> <h4>Let's see how you've been being green!</h4> </div> </div> </div> ); export default Header; <file_sep>import React, { Component } from "react"; import Header from "../../components/DashboardHeader/DashboardHeader.jsx"; import DashboardGraph from "../../components/DashboardGraph/DashboardGraph.jsx"; import Question from "../../components/DashboardQuestion/DashboardQuestion.jsx"; import "./dashboard.css"; import axios from "axios"; class Dashboard extends Component { state = { currentScore: 0 } componentWillMount() { this.loadScore(); } loadScore = () => { axios.get("/currentScore") .then(res => { this.setState({ currentScore: res.data.score }) console.log('on load:', this.state); }) .catch(err => console.log(err)); } handleOnClick = (event) => { event.preventDefault(); const scoreArray = [ parseInt(document.getElementById('quest1').value), parseInt(document.getElementById('quest2').value), parseInt(document.getElementById('quest3').value), parseInt(document.getElementById('quest4').value), parseInt(document.getElementById('quest5').value), parseInt(document.getElementById('quest6').value), parseInt(document.getElementById('quest7').value), parseInt(document.getElementById('quest8').value), parseInt(document.getElementById('quest9').value), parseInt(document.getElementById('quest10').value) ] //sum and grab the values from the questions const reducer = (accumulator, currentValue) => accumulator + currentValue; let newScore = (scoreArray.reduce(reducer)); console.log(newScore); axios .post('/addpoints', {score: newScore}) .then(res => { this.setState({ currentScore: res.data.score }) console.log('on click:', this.state); }) .catch(err => console.log(err)); } render() { return( <div className="Container" style={{marginBottom:250}}> <Header/> <div className="row"> <div className="col-4 offset-1 question-col"> <Question questionTitle="Spaghetti" option1="Option1" option2="Option2" option3="Option3" option4="Option4" /> {/* <div className="form-group"> <h1 className="questions">Transportation</h1> <select className="form-control" id="quest1"> <option value="1">100% Walk/Bike/Public Transit</option> <option value="5">50% Walk/Bike/Public Transit</option> <option value="10">Carpool or drove less than 10 miles</option> <option value="15">Drove my car 10+ miles</option> </select> </div> */} <div className="form-group"> <h1 className="questions">Eating</h1> <select className="form-control" id='quest2'> <option value="1">I ate only Vegan/Vegetarian or from local organic farms</option> <option value="5">I ate some meat, and most stuff was local or organic</option> <option value="10">I ate mostly conventional or processed foods</option> <option value="15">I ate fast food</option> </select> <div className="form-group"> <h1 className="questions">Thermostat</h1> <select className="form-control" id='quest3'> <option value="4">Above 72 in Summer/Below 68 in Winter</option> <option value="10">Above 70 in Summer/Below 70 in Winter</option> <option value="6">Above 68 in Summer/Below 72 in Winter</option> <option value="12">Below 68 in Summer/Above 72 in Winter</option> </select> </div> <h1 className="questions">Grocery Shopping</h1> <div className="form-group"> <select className="form-control" id='quest4'> <option value="0">Brought My Own Bags</option> <option value="5">Used Paper Bags</option> <option value="10">Used Plastic Bags</option> </select> </div> <div className="form-group"> <h1 className="questions">Grocery packaging</h1> <select className="form-control" id='quest5'> <option value="0">All Fresh meat and Produce in reusable bags</option> <option value="5">50% PrePackaged</option> <option value="10">100% PrePackaged</option> <option value="15">No Groceries - I only eat out</option> </select> </div> <div className="form-group"> <h1 className="questions">Bathing</h1> <select className="form-control" id='quest6'> <option value="1">5 Mins or Less/Cold to Warm</option> <option value="5">5 - 10 Mins/Hot</option> <option value="10">10+ Mins/Hot</option> </select> </div> <div className="form-group"> <h1 className="questions">Device Charging</h1> <select className="form-control" id='quest7'> <option value="1">Chargers unplugged when not in use</option> <option value="5">Chargers left plugged into wall always</option> <option value="10">Charge items even if they are not dead</option> <option value="15">If I'm not using it, I'm charging it</option> </select> </div> <div className="form-group"> <h1 className="questions">Energy Conservation</h1> <select className="form-control" id='quest8'> <option value="2">Only lights and appliances in use are turned on</option> <option value="5">Sometimes I leave the lights on</option> <option value="10">Lights and appliances on timers</option> <option value="15">Lights and appliances are always on all over</option> </select> </div> <div className="form-group"> <h1 className="questions">Waste Management</h1> <select className="form-control" id='quest9'> <option value="0">I didn't produce any waste</option> <option value="5">I recycled or composted everything</option> <option value="10">I recycled or composted some</option> <option value="15">I threw everything in the trash</option> </select> </div> <div className="form-group"> <h1 className="questions">Entertainment</h1> <select className="form-control" id='quest10'> <option value="0">I disconnected and read a book or got outside or something similar</option> <option value="5">I spend several hours on Netflix or playing video games</option> <option value="10">I went to a movie and had snacks from a disposable container</option> </select> </div> <button type="button" className="btn btn-success btn3" onClick={this.handleOnClick}>SUBMIT...</button> <h3 className="btn-caption">to see what percentage of your daily carbon you have used today!</h3> </div> </div> <div> <DashboardGraph value={this.state.currentScore}/> </div> </div> </div> ); } } export default Dashboard;<file_sep>import React, { Component } from "react"; import ProfileInfo from '../../components/ProfileInfo/ProfileInfo.jsx'; import axios from 'axios'; import { connect } from 'react-redux'; class Profile extends Component { state = { user: {} }; componentDidMount() { this.loadUser(); } loadUser = () => { axios.get('/user/1') .then( resp => { this.setState({ user: resp.data[0] }) }); } render() { return( <div className="container-fluid well span6"> <div className="row-fluid"> <div className="span8"> <ProfileInfo alttext={this.state.user.name} successStreak={this.state.user.successStreak} challengeScore={this.state.user.challengeScore} /> </div> </div> </div> ); } } function mapStateToProps(state) { return { auth: state.auth }; } export default connect(mapStateToProps)(Profile);<file_sep>// (function() { // })(); register(); function register() { // window.self.addEventListener('notificationclick', event => { // console.log('On notification click: ', event.notification.tag); // event.notification.close(); // }); window.addEventListener('install', function(event) { console.log('Service worker installing...'); // window.skipWaiting(); }); window.addEventListener('activate', function(event) { console.log('Service worker activating...'); }); // I'm a new service worker window.addEventListener('fetch', function(event) { console.log('Fetching:', event.request.url); }); window.addEventListener('notificationclose', function(e) { var notification = e.notification; var primaryKey = notification.data.primaryKey; console.log('Closed notification: ' + primaryKey); }); window.addEventListener('notificationclick', function(e) { var notification = e.notification; var primaryKey = notification.data.primaryKey var action = e.action; if (action === 'close') { notification.close(); } }); // window.addEventListener('notificationclick', function(e) { // var notification = e.notification; // var primaryKey = notification.data.primaryKey // var action = e.action; // if (action === 'close') { // notification.close(); // } else { // e.waitUntil( // clients.matchAll().then(function(clis) { // var client = clis.find(function(c) { // return c.visibilityState === 'visible'; // }); // if (client !== undefined) { // client.navigate('samples/page' + primaryKey + '.html'); // client.focus(); // } else { // // there are no visible windows. Open one. // clients.openWindow('samples/page' + primaryKey + '.html'); // notification.close(); // } // }) // ); // } // window.registration.getNotifications().then(function(notifications) { // notifications.forEach(function(notification) { // notification.close(); // }); // }); // }); window.addEventListener('push', function(e) { var body; if (e.data) { body = e.data.text(); } else { body = 'Default body'; } var options = { body: body, icon: 'images/notification-flat.png', vibrate: [100, 50, 100], data: { dateOfArrival: Date.now(), primaryKey: 1 }, actions: [{ action: 'explore', title: 'Go to the site', icon: 'images/checkmark.png' }, { action: 'close', title: 'Close the notification', icon: 'images/xmark.png' }, ] }; e.waitUntil( window.registration.showNotification('Push Notification', options) ); }); }<file_sep>const mongoose = require("mongoose"); const db = require("../models"); mongoose.Promise = global.Promise; mongoose.connect( process.env.MONGODB_URI || "mongodb://localhost/carbonsensedb", { useMongoClient: true } ); const userSeed = [ { name: "<NAME>", password: "<PASSWORD>", birthday: "1/1/1999", gender: "undefined", location: "under the sea", image: "https://2.bp.blogspot.com/-CPKJ_gdJFt0/Vz2_AgSXrgI/AAAAAAAAGQU/7t8ipb6KLyQ3CBohP1ysNslycP51Mb62ACKgB/s1600/SpongeBob_5.png", gaugeTarget: 100, successStreak: 5, challengeScore: 18 }, { name: "Philly", password: "<PASSWORD>", birthday: "3/15/1980", gender: "male", location: "Raleigh, NC", image: "https://lumiere-a.akamaihd.net/v1/images/open-uri20150608-27674-1jdbpa8_be64ca1b.png?region=0%2C0%2C600%2C565", gaugeTarget: 100, successStreak: 4 , challengeScore: 28 }, { name: "Julianne", password: "<PASSWORD>", birthday: "1/18/1987", gender: "female", location: "Raleigh, NC", image: "https://scontent-atl3-1.xx.fbcdn.net/v/t31.0-8/18216422_1960543210842462_7285967195853793702_o.jpg?oh=888b5540a18613585b435788380400c6&oe=5A8DD4DD", gaugeTarget: 100, successStreak: 6, challengeScore: 15 } ]; db.User .remove({}) .then(() => db.User.collection.insertMany(userSeed)) .then(data => { console.log(data.insertedIds.length + " records inserted!"); process.exit(0); }) .catch(err => { console.error(err); process.exit(1); }); <file_sep>import React, { Component } from "react"; import LeaderboardTable from '../../components/LeaderboardTable/LeaderboardTable.jsx'; import "./leader.css"; import LeaderboardTableRow from '../../components/LeaderboardTableRow/LeaderboardTableRow.jsx'; import axios from 'axios'; class Leaderboard extends Component { state = { leaders: [] }; componentDidMount() { this.loadLeaderboard(); } loadLeaderboard = () => { axios.get("/leaderboard/successstreak") .then(res => { this.setState({ leaders: res.data }) console.log(this.state) }) .catch(err => console.log(err)); } render () { return ( <div className="Container"> <div> <h1 className="Head">Leaderboard</h1> </div> <div> <h4 className="Head2">See how you measure up against other CarbonSensers!</h4> </div> <LeaderboardTable> {this.state.leaders.map( (result, index) => { return ( <LeaderboardTableRow key={index + 1} index={index + 1} name={result.name} location={result.location} gender={result.gender} successStreak={result.successStreak} challengeScore={result.challengeScore} /> ); }) } </LeaderboardTable> </div> ); } } export default Leaderboard;<file_sep>const mongoose = require('mongoose'); const dailyScoreSchema = new mongoose.Schema({ date: { type: String, // required: true }, score: { type: Number, required: true, default: 0 } }); const DailyScore = mongoose.model('DailyScore', dailyScoreSchema); module.exports = DailyScore;
f82ca9e131c917ee87e4ff2cf2b3976adc539c0d
[ "JavaScript", "Markdown" ]
19
JavaScript
psd314/CarbonSense
fcb17ed63d77735916e95791f5377327a2918e27
73ceeede01ddeb99380bb6243df705eb434f847f
refs/heads/master
<file_sep># WOB - Which one to buy Show the best option for buying using value per unit meter. ## Development ### Tools - NodeJS (https://nodejs.org/) - [React App](https://create-react-app.dev/) - Bootstrap ### Install ```bash npm install ``` ### Start ```bash npm start ``` Then open [http://localhost:3000](http://localhost:3000) to view it in the browser. ### Test ```bash npm test ``` ### Build ```bash npm run build ``` <file_sep>import React from 'react' export default function ItemRow(props) { return ( <div> <span>Item {props.index}</span> <select> <option>kg</option> <option>g</option> </select> <input type="number" placeholder="value" /> </div> ) }
8a076464018fc25c151da37748871a5826c841ee
[ "Markdown", "JavaScript" ]
2
Markdown
miltonsiqueira/wob
eeddf41ee1f046c31487496fbe6663da697deaf5
8f1c236c17edf074a804819e4d00ed4fb43bd376
refs/heads/master
<repo_name>eltonsantos/eltonsantos.github.io<file_sep>/_posts/2018-02-12-sola-gratia.md --- layout: post title: Estudo dos solas:&nbsp; Sola Gratia categories: devocionais image: solagratia.jpg logo: solagratia.jpg description: Parte 2 - Sola Gratia - Efésios 2:8-10 url: https://eltonsantos.github.io/devocionais/sola-gratia/ author: <NAME> comments: true --- __"Porque pela graça sois salvos, por meio da fé; e isto não vem de vós, é dom de Deus. Não vem das obras, para que ninguém se glorie; Porque somos feitura sua, criados em Cristo Jesus para as boas obras, as quais Deus preparou para que andássemos nelas."__ Efésios 2:8-10 <p class="intro"><span class="dropcap">P</span>az de Deus meus amados irmãos, dando continuidade no estudo sobre os 5 solas da reforma protestante, hoje estudaremos sobre o Sola Gratia ou Somente a Graça.</p> Irmãos, o que significa somente a graça? No dicionário, um termo mais técnico de graça significa: dom que Deus concede aos homens e que os torna capazes de alcançar a salvação. Assistindo uma pregação do __reverendo <NAME>__, ele cita um exemplo bastante interessante sobre o que é a graça. A história conta que certa vez um professor recebe um aluno em sua classe, um adolescente rebelde, violento e cheio de problemas familiares e sociais, porém esse professor, agindo por compaixão, acolheu esse aluno e passou a querer ajudá-lo. Um dia esse professor passou em uma loja, comprou roupas, calçados e foi à casa desse menino. No entanto, esse menino, não querendo receber ninguém em sua casa, pega uma pedra e joga nesse professor. O professor cai ferido no chão e aí ele é levado para o hospital para ser tratado. Quando ele se recupera, o professor vai novamente fazer outra tentativa para entregar os presentes ao aluno. E quando ele consegue chegar ao local, o pai do menino abre a porta, constrangido, pois sabia o que seu filho havia feito com aquele professor, e disse: Meu filho jogou uma pedra no senhor e o feriu, ele não merece isso que o senhor trouxe. O professor responde: é verdade, seu filho não merece, mas ele precisa! **Isso é graça. Algo que não merecemos, mas precisamos**! Por nossos pecados merecemos condenação, no entanto Deus nos ama não por quem somos, mas apesar de quem somos. Conseguem entender a diferença? É um amor incondicional, que Ele nos amou quando éramos ímpios, fracos, pecadores. Ele não ama os nossos atos, as coisas que fazemos de ruim. Porém Ele nos ama. E Ele provou o Seu amor por nós enviando o Seu único filho para morrer pelos nossos pecados! E este homem, <NAME>, morreu em nosso lugar, para nos dá a vida eterna. Não somos salvos pelo o que fazemos para Deus, pelas nossas obras e atos, mas **somos salvos por aquilo que Deus fez por nós**, em Cristo Jesus. É um presente, é de graça, mas é muito caro, custou um alto preço, custou o sangue do filho de Deus. E tudo isso já foi pago por Ele na cruz. Irmãos, só deixando um adendo aqui. Não podemos confundir misericórdia com graça. A misericórdia é Deus não nos castigando como merecem os nossos pecados e a graça é Deus nos abençoando apesar de não merecermos. Misericórdia é a libertação do julgamento, enquanto graça é estender bondade aos indignos. Deu pra entender? Ninguém vai poder chegar no céu, se achando e dizendo: eu mereci, agora eu quero esse prêmio que conquistei com meu esforço, com minhas obras e ações. Não, todos nós teremos que chegar humildes ao pés do Senhor dizendo: foi pela graça que cheguei aqui. Por que Ele me amou, Por que <NAME> morreu por mim. Por que o Espírito Santo me transformou. Irmãos, o padrão que Deus exige para ser salvo é a perfeição, é fazer 100%, a Bíblia é clara quando em __Tiago 2:10__ diz que se guardarmos toda a Lei, mas tropeçarmos em um único ponto seremos condenados por ela inteira, então ninguém passa por esse pente fino das obras. Nenhum ser humano, por mais top de Jerusalém que ele seja, conseguiria cumprir toda a risca e ser aprovado. Então é aí que Deus oferece esse presente a quem é salvo, **não merecemos, mas precisamos**! Para encerrar meus amados irmãos, **é somente pela Graça que somos salvos** e o instrumento usado, o meio usado é a fé. E aí vem as obras, as obras não são a causa para a nossa salvação, mas as obras são resultado que somos salvos. Por que nem todos aqueles que fazem as obras são salvos, mas todos aqueles que são salvos fazem as obras. Amém, amém e amém! Que Deus abençoe a todos!<file_sep>/_posts/2017-12-11-devocional-11-12-2017.md --- layout: post title: Devocional - 11/12/2017 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Devocional - 11/12/2017 - Jó 1:1 url: https://eltonsantos.github.io/devocionais/devocional-11-12-2017/ author: <NAME> comments: true --- __"Havia um homem na terra de Uz, cujo nome era Jó; e era este homem íntegro, reto e temente a Deus e desviava-se do mal."__ Jó 1:1 <p class="intro"><span class="dropcap">P</span>az de Deus, no devocional de hoje quero falar um pouco sobre Jó.</p> A Bíblia nos ensina que Jó era um homem íntegro, reto, temente a Deus e que se desviava do mal. Irmãos, quando Deus fala a Satanás sobre Jó ele não diz: observaste meu servo Jó que é rico, próspero e saudável? Não, Ele diz: observaste meu servo Jó, homem íntegro, reto, temente a Deus e que se desvia do mal. Ou seja, Deus não está interessado no que você tem, mas sim do que você é. Aqui a Bíblia apresenta quatro características de Jó, a primeira delas: **íntegro**, ser íntegro significa você ser honesto quando ninguém está vendo, manter uma conduta ética quando você não é observado. Outra característica é ser **reto**, ser reto significa caminhar em conformidade com a justiça, Lei, razão e direito. Não tender para nenhum outro lado que não seja a retidão, um exemplo de retidão é durante a declaração de imposto de renda. Não interessa se o governo fará bom ou mal uso do dinheiro recolhido, devemos fazer a diferença das demais pessoas, pois somos um povo separados por Deus. Outra característica observada em Jó é ser **temente a Deus**. Temor a Deus nao significa ter medo de Deus, mas sim significa ter obediência, reverência e respeito, é buscar andar nos caminhos do Senhor, em conformidade com a Sua palavra. A última característica e tão importante quanto às outras é o **desviar-se do mal**. Desviar do mal significa evitar permanecer em um lugar ou continuar fazendo algo que você sente que irá contra os preceitos de Deus e que irá desagradar ao Senhor. Irmãos, devemos copiar essas características de Jó, pois elas estão em conformidade com a Palavra de Deus. Devemos viver nossa vida com integridade, retidão, temor ao Senhor e nos afastar de todo o mal.<file_sep>/_posts/2017-03-07-a-porta-que-deus-abre-ninguem-fecha.md --- layout: post title: A porta que Deus abre ninguém fecha categories: devocionais image: porta.jpg logo: porta.jpg description: A porta que Deus abre ninguém fecha e a porta que Deus fecha ninguém abre - Apocalipse 3:8. url: https://eltonsantos.github.io/devocionais/a-porta-que-deus-abre-ninguem-fecha/ author: <NAME> comments: true --- >__“Eu sei as tuas obras; eis que diante de ti pus uma porta aberta, e ninguém a pode fechar; tendo pouca força, guardaste a minha palavra, e não negaste o meu nome.”__ Ap. 3:8 <p class="intro"><span class="dropcap">E</span>stive meditando nessa passagem que diz popularmente: "A porta que Deus abre ninguém fecha e a porta que Deus fecha ninguém abre".</p> E de fato isso é verdade, quando Deus está a frente e Ele te promete algo, acredite, NADA nesse mundo será capaz de mudar isso. Entretanto quando Ele dá um ponto final, não há nada mais a ser feito, pois o ponto final Dele é absoluto. Vejam o exemplo de Jonas, que basicamente não queria fazer aquilo que Deus havia pedido, não queria aquela porta que Deus abriu... mas não teve jeito, nem mesmo a sua vontade foi capaz de mudar a do Senhor, pois Ele é nosso soberano e quando Ele determina algo, isso de realmente irá acontecer. Por isso, ore a Deus, busque ao Senhor, as portas não abrirão de imediato, Deus trabalha no silêncio, na quietude da vida, seu tempo é diferente do nosso, a forma ilimitada que Ele sabe das coisas transcede o nosso entendimento, mas tenha a certeza de que quando Ele falar, quando Ele te prometer, saiba que isso é que irá acontecer independentemente da vontade humana, mesmo que seja impossível aos homens ou naquele momento você não enxergue meios concretos, na Sua hora Ele proverá o necessário e mudará todo o cenará para ser propício a vontade do Soberano. Amém. Paz do Senhor.<file_sep>/_posts/2018-02-14-reflexao-1-sm-30-v8.md --- layout: post title: Reflexão - 1 Samuel 30:8 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Reflexão - 1 Samuel 30:8 url: https://eltonsantos.github.io/devocionais/reflexao-1-sm-30-v8/ author: <NAME> comments: true --- **"Então consultou Davi ao Senhor, dizendo: Perseguirei eu o bando? Alcançá-lo-ei? Respondeu-lhe o SENHOR: Persegue-o, porque, de fato, o alcançarás e tudo libertarás."** 1 Sm 30:8 Paz do Deus meus queridos e amados irmãos, nesse versículo encontramos uma simples, embora verdadeira e profunda oração de Davi. O contexto desse versículo é que Davi estava totalmente atribulado, tendo perdido tudo para os amalequitas, família, família dos seus soldados, rebanhos, suas posses, e ainda estava sendo ameaçado de perder a vida por não ter conseguido defender os seus. Porém mesmo em meio a uma luta tensa e angustia profunda, Davi orou, clamando ao Deus todo poderoso e além disso, ele esperou, esperou a resposta do SENHOR para a sua causa. Irmãos, quero encorajá-los a não somente a orar, mas também a esperar o tempo certo de Deus para que só então, agir conforme o Senhor determinou. O tempo não é o nosso, não é o da situação, não é o da adversidade, **o tempo é do SENHOR**. Mesmo na adversidade, Davi orou e esperou o tempo certo. Saibam que a palavra **ORAÇÃO** é composta de **ORAR** com **AÇÃO**, no entanto é preciso primeiro **ORAR** e ao separar a palavra isso mostra o tempo que devemos **ESPERAR** para que após a resposta de Deus seja tomada a **AÇÃO**. E assim fez Davi, e a Bíblia deixa clara que mesmo após toda essa amargura e angustia, Davi se reanimou no seu Deus e foi cumprir o que de fato Deus havia o mandado: **persegue-o e tudo libertarás**!! Irmãos, procuremos orar e obedecer o tempo do SENHOR, para somente agir no Seu tempo e de acordo com a Sua Palavra, com fé Nele e na Sua providência. Que O Senhor abençoe a todos! Amém!!<file_sep>/_posts/2017-06-10-compreendendo-a-graca-de-deus-diante-das-dores.md --- layout: post title: Compreendendo a graça de Deus diante das dores categories: devocionais image: coroa-cristo.jpg logo: coroa-cristo.jpg description: Compreendendo a graça de Deus diante das dores - 2 Timóteo 4:6-18 url: https://eltonsantos.github.io/devocionais/compreendendo-a-graca-de-deus-diante-das-dores/ author: <NAME> comments: true --- "Porque eu já estou sendo oferecido por aspersão de sacrifício, e o tempo da minha partida está próximo. Combati o bom combate, acabei a carreira, guardei a fé. Desde agora, a coroa da justiça me está guardada, a qual o Senhor, justo juiz, me dará naquele dia; e não somente a mim, mas também a todos os que amarem a sua vinda. Procura vir ter comigo depressa, Porque Demas me desamparou, amando o presente século, e foi para Tessalônica, Crescente para Galácia, Tito para Dalmácia. Só Lucas está comigo. Toma Marcos, e traze-o contigo, porque me é muito útil para o ministério. Também enviei Tíquico a Éfeso. Quando vieres, traze a capa que deixei em Trôade, em casa de Carpo, e os livros, principalmente os pergaminhos. Alexandre, o latoeiro, causoume muitos males; o Senhor lhe pague segundo as suas obras. Tu, guarda-te também dele, porque resistiu muito às nossas palavras. Ninguém me assistiu na minha primeira defesa, antes todos me desampararam. Que isto lhes não seja imputado. Mas o Senhor assistiu-me e fortaleceu-me, para que por mim fosse cumprida a pregação, e todos os gentios a ouvissem; e fiquei livre da boca do leão. E o Senhor me livrará de toda a má obra, e guardar-me-á para o seu reino celestial; a quem seja glória para todo o sempre. Amém." 2 Timóteo 4:6-18 Essa Palavra foi pregada no culto da minha igreja e o Senhor falou forte ao meu coração acerca dela para que eu viesse a compartilhar o que entendi. ## Compreendendo a graça de Deus diante das dores <p class="intro"><span class="dropcap">A</span>prendemos com Paulo como a graça de Deus nos ajuda nos momentos de aflição e angustia. Devemos entender que <b class="red">a graça de Deus não nos livra do sofrimento</b>, pois Paulo foi perseguido em Tessalonica, açoitado em Macedonia, acusado e espancado em Jerusalém, sofre um naufragio em Mileto, picado por uma cobra venenosa na ilha de Malta, apedrejado quase até a morte em Listra, expulso de Antioquia, passou fome e sede em Corínto, foi preso e decapitado por Nero em Roma, e em meio a tantas tribulações que Paulo sofreu, essa graca o fortalece e nos fortalece para suporta-las e que fez Paulo dizer em Gálatas 6:17: "Eu trago no meu corpo as marcas de Jesus".</p> Irmãos amados, quero dizer pra vocês que o nosso sofrimento nao é sinal de que estamos longe da vontade de Deus. As nossas angustias nos fazem estar mais perto do nosso Senhor Jesus e esse tempo presente de sofrimento e de dor não pode ser comparado com aquilo que há de vim quando chegar a hora. Irmãos contem com a graça de Deus! Mas o que essa essa graça de Deus? Como compreender essa graça que para muitos pode parecer loucura? Paulo nos deu exemplos práticos **Paulo sofreu de solidão** O apóstolo dos gentis não foi assistido por ninguém. Gente precisa de Deus, mas também gente precisa de gente, somos sociais, ninguém pode conviver sozinho por toda a vida, precisamos nos relacionar socialmente. Na hora que mais precisou ficou sozinho. **Paulo sofreu de ingratidão** Todos o abandonaram. Paulo deu sua vida, mas não teve ninguém que tivera se esforçado para da em troca. Quando fizeres algo a alguém não espere nada em troca, não espere receber de volta o amor ao qual tem dado. **Paulo sofreu perseguição** Como pode ser lido no verso 14, Paulo sofreu muitos males de seus inimigos, mas Paulo sabia que a vingança viria do Senhor. **Paulo sofreu a dor da resistência e oposição** Não levante uma agulha, não o mate, não brigue, não discuta, não pragueje, não abra a boca para se levantar contra teus inimigos, por que deles quem cuida é o próprio Deus. Você não precisa defender Teu Deus, é Ele quem nos defende. Irmãos, a graça de Deus nos capacita a obter valores mais altos para a própria vida. Paulo comparou sua vida a um sacrifício ao Senhor, vivendo pra Ele, oferecendo tudo a Ele e é com esse sacrifício que Deus vai usar a tua crise como exemplo para ensinar as demais pessoas e você dirá: Foi necessário eu passar por tudo isso para entender os mistérios do Senhor. Glória a Deus! Outro ponto é quando Paulo fala no verso 6 que partida aqui tem o significado de morte, mas o que a morte significa para Paulo? A morte para Paulo significa deixar a carga, alívio, soltar os laços, libertação, afrouxar a tenda, levantar acampamento e ir morar na casa do Pai. E a partir do verso 7, Paulo começa a discursar a respeito do seu fim. Um versículo bastante famoso e maravilhoso. Normalmente é muito falado quando as pessoas se aposentam, meu pai é um dos que tão louco pra falar, rsrs. <b class="red">"Combati o bom combate"</b> significa olhar para a vida com dificuldade, mas ter bom ânimo para vencer o mundo e lutar contra tudo conforme Jesus disse em João 16:33. <b class="red">"Completei a carreira"</b> significa aguentar a até o fim, aguentar até a hora em que Cristo o chamar, significa não se distrair com coisas fúteis, manter os olhos sempre no alvo e saber que vai valer a pena meu irmão, vai valer a pena mesmo tudo o que tu tens passado. <b class="red">"Guardei a fé"</b> aqui significa ser um soldado fiel ao Senhor até o fim, não olhar pra trás, não olhar para as circunstâncias, não ser como a mulher de Ló que olhou para trás e virou uma estátua de sal. E no versículo seguinte, Paulo tinha a certeza da coroa o qual o Senhor, o justo juiz, lhe dará naquele dia! Paulo tinha a certeza da vinda de nosso Senhor, <NAME>. A graça de Deus nos capacita a receber ajuda dos céus na hora certa, como pode ser lido no verso 17, onde Paulo diz: _"O Senhor me assistiu e me fortaleceu"_. Por que quando somos abandonados por todos aqueles que disseram que nunca iriam nos abandonar, quando somos largados e deixados sozinho no sofrimento, o Senhor está lá nos assistindo e recolhendo cada uma de nossas lágrimas, oh aleluia glória a Deus! Meus irmãos, o Senhor não nos livrará das provas, mas nos dará poder para passar por todas elas, creiam nisso em nome de Jesus! Você não vai ficar pelo caminho, esse deserto nao durará para sempre, o teu ministério vai se completar, os planos de Deus vão se cumprir em sua vida por mais impossível que pareça ser se somente você crer Nele com todo o seu coração! <b class="red">Os teus problemas não são nada diante da grandeza do Deus todo poderoso!</b> E para concluirmos, em vez de murmurar, balançar a cabeça, praguejar seus inimigos, Paulo louvou e adorou ao Senhor e disse: <b class="red">"A Ele, glória pelos séculos dos séculos. Amém!!"</b><file_sep>/_posts/2017-04-14-instalando-e-configurando-o-java-no-windows-10.md --- layout: post title: Instalando e configurando o Java no Windows 10 categories: artigos image: java.jpg logo: java.jpg description: Instalando e configurando o Java no Windows 10. url: https://eltonsantos.github.io/artigos/instalando-e-configurando-o-java-no-windows-10/ author: <NAME> comments: true --- <p class="intro"><span class="dropcap">F</span>ala galera, blz? Hoje vou falar de uma coisa simples e básica, mas que no começo pode deixar muitos iniciantes com dúvidas: como instalar e configurar o Java em ambiente Windows. Para esse tutorial estarei usando o Windows 10 e Java em sua última versão (version 8 Update 121).</p> Fala galera, blz? Hoje vou falar de uma coisa simples e básica, mas que no começo pode deixar muitos iniciantes com dúvidas: como instalar e configurar o Java em ambiente Windows. Para esse tutorial estarei usando o Windows 10 e Java em sua última versão (version 8 Update 121). Antes de começar quero tirar uma dúvida sobre a diferença entre JRE e JDK. **JRE (Java Runtime Environment)** É utilizado para executar aplicações na plataforma Java. Composto pelas bibliotecas (APIs) e pela máquina virtual (JVM). O Java Runtime Environment não contém qualquer ferramenta para o desenvolvimento de aplicações, ou seja, você não vai conseguir compilar código fonte Java apenas com ele. **JDK (Java Development Kit)** É o pacote que contém toda a infraestrutura necessária para o desenvolvimento de aplicações Java. Ao ser instalado, o JRE é instalado automaticamente. As ferramentas incluem: compilador (javac.exe), depurador e outros utilitários. Pronto, agora que sabemos disso, podemos seguir o passo a passo com a instalação e configuração do Java. ## Passo 1 **Download do Java JDK** Acesse esse [Link direto](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html?ssSourceSiteId=otnpt). Aceite os termos de licença, escolha a versão *jdk-8u121-windows-x64.exe*, para sistemas de 64 bits, caso seu Windows seja de 32 bits escolha *jdk-8u121-windows-i586.exe*. Faça o download normalmente. ## Passo 2 **Instalando o Java** Para instalar o JDK no Windows, basta executar o programa de instalação e clicar no botão Next em todas as telas apresentadas. O processo é simples e rápido. ## Passo 3 **Configurar variáveis de ambiente** Essa é a parte inicial mais complicada do processo, mas vocês verão que não é nenhum bicho de sete cabeças. Para começar pressione _Windows + S_ para abrir a busca do Windows e digite "variáveis de ambiente". Clique em Enter. Vá na opção *Variáveis de ambiente*. Aqui é preciso dizer para o computado qual o caminho de instalação do Java. Aqui, no label Variáveis do sistema clique no botão Novo. No pop up que aparecer, no campo Nome da variável digite: ```JAVA_HOME``` No valor da variável digite o caminho em que o Java foi instalado em sua máquina. Exemplo: ```C:\Program Files\Java\jdk1.8.0_121``` Com isso o Java já será reconhecido pelo seu sistema, porém ainda não poderá ser compilado,pois ainda precisamos realizar um segundo passo que é editar o Path, que é o caminho para executar os comandos através do prompt. Clique em Path, em Variáveis do sistema, e clique no botão Editar. No final do campo Valor da variável, digite: ```;%JAVA_HOME%\bin``` Clique em OK e pronto. ## Passo 4 **Testando tudo** Abra o cmd (prompt de comando do Windows) através do campo de busca do lado esquerdo ou pressionando em _Windows + S_ e digite cmg e aperte Enter. Digite: ``` java -version``` E você vera a versão do Java seguido de outras informações. Após isso digite: ```javac -version``` Caso tudo todas as variáveis de ambiente tenham sido configuradas corretamente será mostrada a versão do compilador na tela. Caso mostre que esse comando não foi reconhecido, refaça o tutorial com mais detalhe e verifique o que foi feito. É isso pessoal. Espero que tenham gostado, um tutorial simples, porém bastante necessário. Forte abraço! <file_sep>/_posts/2017-12-15-jesus-e-a-esperanca-quando-nao-ha-esperanca.md --- layout: post title: Jesus é a esperança quando não há esperança categories: devocionais image: esperanca.jpg logo: esperanca.jpg description: Jesus é a esperança quando não há esperança - Marcos 5:21-24 e Marcos 5:35-43 url: https://eltonsantos.github.io/devocionais/jesus-e-a-esperanca-quando-nao-ha-esperanca/ author: <NAME> comments: true --- __"E, passando Jesus outra vez num barco para o outro lado, ajuntou-se a ele uma grande multidão; e ele estava junto do mar. E eis que chegou um dos principais da sinagoga, por nome Jairo, e, vendo-o, prostrou-se aos seus pés, E rogava-lhe muito, dizendo: Minha filha está à morte; rogo-te que venhas e lhe imponhas as mãos, para que sare, e viva. E foi com ele, e seguia-o uma grande multidão, que o apertava."__ Marcos 5:21-24 __"Estando ele ainda falando, chegaram alguns do principal da sinagoga, a quem disseram: A tua filha está morta; para que enfadas mais o Mestre? E Jesus, tendo ouvido estas palavras, disse ao principal da sinagoga: Não temas, crê somente. E não permitiu que alguém o seguisse, a não ser Pedro, Tiago, e João, irmão de Tiago. E, tendo chegado à casa do principal da sinagoga, viu o alvoroço, e os que choravam muito e pranteavam.E, entrando, disse-lhes: Por que vos alvoroçais e chorais? A menina não está morta, mas dorme. E riam-se dele; porém ele, tendo-os feito sair, tomou consigo o pai e a mãe da menina, e os que com ele estavam, e entrou onde a menina estava deitada. E, tomando a mão da menina, disse-lhe: Talita cumi; que, traduzido, é: Menina, a ti te digo, levanta-te. E logo a menina se levantou, e andava, pois já tinha doze anos; e assombraram-se com grande espanto. E mandou-lhes expressamente que ninguém o soubesse; e disse que lhe dessem de comer."__ Marcos 5:35-43 <p class="intro"><span class="dropcap">P</span>az de Deus meus amados irmãos, boa noite. Nessa noite Deus me permitiu trazer a vocês uma palavra não de exortação como na semana passada, mas dessa vez de encorajamento acerca da esperança e fé: Jesus é a esperança quando não há esperança, é o tema que meditaremos hoje.</p> O contexto histórico retrata Jesus ajudando muitas pessoas, a Bíblia relata Jesus socorrendo seus discípulos numa tempestade, em seguida, Jesus liberta um endemoniado. Depois, Jesus cura uma mulher que sofria a 12 anos com fluxo de sangue e durante esse período entra Jairo, um dos principais da sinagoga, bastante conhecido e respeitado que estava com sua filhinha muito doente. Certamente, Jairo, deveria ter recursos suficientes para ter buscado ajuda nos melhores especialistas da época e mesmo tendo procurado os melhores de nada adiantou, pois sua filhinha estava sempre piorando. Então chega um momento que Jairo decide: eu vou até Jesus. Irmãos, por mais grave que seja a minha e a sua enfermidade, Jesus é a nossa esperança. Irmãos, meditei nesse texto não somente para mostrar que Jesus fez milagre de ressuscitar a filhinha de Jairo, mas para provar que Jesus é o mesmo ontem, hoje e sempre. O mesmo Jesus que ressuscita pessoas é o Jesus que está aqui hoje conosco e que Ele atende o clamor do aflito, não importando a gravidade da nossa situação, pois Jesus pode mudar nossa situação. O texto trás alguns pontos centrais que chamam a atenção, o primeiro deles é que Jairo vai até Jesus apresentando sua causa. Irmãos eu quero encorajá-los a levar também as suas causas aos pés de Jesus, independente da sua luta, necessidade, problema, coloque-as aos pés de Jesus. Irmãos, os nossos problemas podem nos levar rapidamente aos pés de Jesus. Quando você notar que todos os recursos não estão ajudando, você vai se humilhar aos pés de Jesus e romper algumas barreiras, assim como fez Jairo, que era bastante conhecido, mas deixou seu orgulho de lado para suplicar. Não deixem de se humilhar a Jesus por causa do orgulho, não se prive das maiores bençãos da vida por causa do orgulho, da soberba e do nariz empinado. E aí eu faço uma pergunta: O que te impede de ir até Jesus? É a sua família? É o seu trabalho? São os seus amigos? Quantas pessoas deixam de ir até Jesus por causa de críticas. Enquanto houver qualquer preocupação com opiniões das pessoas você se privará das bençãos de Deus. Nós devemos estarmos repletos de fé ao nos prostrarmos aos seus pés. Jairo se humilhou a Jesus, ele não falou de igual pra igual por ser chefe da sinagoga e muito importante, não, ele se humilhou, se reduziu a nada, ele abriu mão de tudo o que era, ele não exigiu nada, mas pediu com humildade. Irmãos, abram mão de tudo, do status, da fama, das posses para se humilhar na presença de Cristo e saibam que o lugar mais alto em que podemos chegar na vida é aos pés de Jesus. Quando nós estamos aos seus pés, estamos no cume, chegamos no ponto máximo onde podemos chegar. Vejam que a Bíblia também diz que Jairo insistiu, insistentemente suplicou. Notem que não foi um simples pedido, foi uma insistência, perseverança, determinação. Irmãos, por que que Deus as vezes não atende na hora, mas sim espera que você insista? Já pensaram sobre isso? Deus poderia muito bem resolver tudo na hora, em questão de segundos, mas por que o Senhor nos deixa esperando as vezes tanto tempo? Por que Ele quer tratar a sua causa diretamente com você, Ele quer saber o quão consciente você está da sua necessidade ou quão determinado você está a buscar seus desejos ou quão desesperadamente você quer uma ação e intervenção Dele. Irmãos assim como Jairo insistiu, perseverou e não desistiu dela, eu encorajo vocês a também não desistir de suas causas. Todos nós temos alguma causa urgente e não devemos estarmos dispostos a abandoná-la. Devemos insistir, clamar, orar. Outro ponto que notamos é que Jairo clamou com fé, ele não disse: Jesus vamos ali que se o Senhor colocar as mãos nela, talvez ela dê uma melhorada, não, ele disse e ela melhorará! Não devemos ter espaços pra dúvidas, devemos crer que Jesus pode e faz a ação se assim for da vontade Dele. Outro ponto importante é que Jesus foi com ele, levando esperança. Irmãos quando Jesus vai conosco temos a certeza que Ele se importa com a gente, Ele conhece sua vida, seu passado, Ele conhece o seu lado que ninguém conhece, Ele sabe o quão grave é seu problema. Irmãos acreditem: se formos atrás de Jesus, e nosso coração estiver quebrantado, de forma alguma Ele nos apartará, Ele nos acolherá! Nesse ponto Jesus da três palavras para Jairo: a primeira palavra é a **fé** (verso 36). Quando Jesus está indo em direção a filha de Jairo chega uma mulher clamando por Ele que O toca e O faz parar para atendê-la, acredito que Jairo deve ter ficado bastante agoniado, pois sua filhinha estava muito doente e cada minuto parado é praticamente uma eternidade. Até que também nesse período chega uma pessoa e diz: Jairo não precisa mais incomodar o Mestre, sua filhinha morreu... irmãos, imaginem como deve ter sido a reação de Jairo ao ouvir isso. É nesse momento que entra a fé: não temas, crer somente. Qual a nossa esperança? Enquanto há vida, há esperança. Não é assim que diz o ditado? Irmãos o glorioso nesse texto é que nem a morte pode dar um fim a nossa esperança. Eu não sei qual o tamanho do seu problema, não sei qual a gravidade da sua situação, talvez alguém já tenha dito pra você que não dá mais, que acabou, que não tem mais saída, que não tem mais jeito, mais solução. Os homens já deram o diagnóstico? Crer somente. Os homens já deram a sentença? Crer somente. Os homens já assinaram aqueles papéis? Crer somente. Essa é a palavra de fé para o nosso coração. A segunda palavra, é a palavra de **esperança** (verso 39). Notem que Jesus não dá um diagnóstico físico acerca da menininha, nós que somos crentes entendemos que aquele que morre no Senhor, está na verdade com seu corpo adormecido. A Bíblia costuma usar essa metáfora, para ilustrar que quem dorme, não dorme pra sempre, uma hora vai acordar. E o corpo que dorme, um dia ouvirá a voz do Senhor. O crente, ainda que morra, viverá, pois a morte não tem a última palavra. A terceira palavra é a palavra do **poder** (verso 41). Jesus ordena e a menina se levanta. Irmãos, Jairo está diante daquele que tem poder sobre a morte. Ele é o rei dos reis e a morte não tem poder para desafia-Lo. Irmãos, quando Jesus vai conosco, os imprevistos da vida não frustram os planos do Senhor. Jesus não chega atrasado em sua vida, Ele chega na hora certa. Mesmo que você chegue a um ponto que diz: agora é impossível. Esse impossível dos homens, é possível pra Jesus. O último ponto que chama a atenção é: quando estamos com Jesus as nossas tragédias ainda tem solução, não precisamos temer as más notícias. A crise insolúvel pros homens ainda tem solução, aquele beco sem saída ainda tem saída. Jairo descobriu que sua filha tinha morrido, pra ele ali o mundo desaba, a informação parece ter arrancado o último rastro de esperança do coração de Jairo, mas saibam, amados irmãos, que na Bíblia a fé não procede do milagre, e sim o milagre procede da fé. Em outras palavras, é a fé que dá origem ao milagre. Não temas, crer somente. Quando os nossos recursos chegam ao fim, os recursos de Jesus estão sempre disponíveis. Quando nosso poder acaba, os poderes de Jesus estão pleno para nos atender completamente. Não precisamos temer as más notícias quando estamos andando com Aquele que venceu até a morte. Por fim irmãos, lembrem-se: **a morte não tem a última palavra**. Quando a menina se levantou, todos ficaram admirados, inclusive aqueles que estavam rindo. A morte é o último inimigo a ser vencido, mas Jesus já a venceu. Jesus matou a morte com a sua morte e agora Ele é a ressurreição e a vida. E todo aquele que Nele crer tem a vida eterna. Irmãos, para Ele, não há causa perdida, não há crise insolúvel, não há problemas que não possam ser resolvidos, e finalmente o choro da morte é transformado na alegria da vida, quando Ele está conosco. Louvado seja o nome do Senhor. Que Deus abençoe a todos. Amém!<file_sep>/_posts/2017-03-20-diferenca-entre-os-e-oscip.md --- layout: post title: Diferença entre OS e OSCIP categories: artigos image: direito-administrativo.jpg logo: direito-administrativo.jpg description: Direito administrativo - Diferença entre OS e OSCIP. url: https://eltonsantos.github.io/artigos/diferenca-entre-os-e-oscip/ author: <NAME> comments: true --- Primeiramente, precisamos entender o que é **OS** e **OSCIP**, para depois saber a diferença entre ambas. OS e OSCIP, resumidamente são **entidades paraestatais**, aquelas pessoas jurídicas que atuam ao lado e em colaboração com o Estado, sem com ele se confundirem. Trata-se de **pessoas privadas**, vale dizer, instituídas por particulares, **sem fins lucrativos**, que exercem função típica, embora não exclusiva, do Estado, se sujeitando ao controle direto ou indireto do Poder Público. Segundo Maria <NAME>: >__"Entidades paraestatais são pessoas privadas que colaboram com o Estado desempenhando atividade não lucrativa e às quais o Poder Público dispensa especial proteção, colocando a serviço delas manifestações do seu poder de império, como o tributário, por exemplo."__ As entidades paraestatais não fazem parte da Administração Indireta; elas integram o **terceiro setor**. ### Organizações Sociais (OS) Organização social é a qualificação jurídica dada a pessoa jurídica de direito privado, sem fins lucrativos, instituída por iniciativa de particulares, e que recebe a delegação do Poder Público, mediante **contrato de gestão**, para desempenhar serviço público de **natureza social**. As organizações sociais não são uma categoria de pessoa jurídica. Trata-se, apenas, de uma qualificação atribuída pelo Poder Público a determinadas entidades privadas. ### Organização da sociedade civil de interesse público (Oscip) Organização da sociedade civil de interesse público é a qualificação jurídica dada a pessoas jurídicas de direito privado, sem fins lucrativos, instituídas por iniciativa de particulares, para desempenhar **serviços sociais** não exclusivos do Estado com incentivo e fiscalização pelo Poder Público, mediante vínculo jurídico instituído por meio de **termo de parceria**. >__A qualificação como organização social é ato discricionário. Já a qualificação como Oscip é ato vinculado.__ BEM resumidamente é isso, agora vou fazer um esqueminha aqui tratando a diferença entre OS e OSCIP OS | OSCIP --------- | ------ Foram idealizadas para substituir órgãos e entidades da Administração Pública, que seriam extintos e teriam suas atividades "absorvidas" pela OS | Não foram idealizadas para substituir órgãos e entidades da Administração Pública Formalizam parceria com o Poder Público mediante **contrato de gestão** | Formalizam parceria com o Poder Público mediante **termo de parceria**. Qualificação é **ato discricionário** | Qualificação é **ato vinculado** Qualificação depende de aprovação pelo Ministro de Estado ou titular de órgão supervisor ou regulador da área de atividade correspondente ao objeto social da OS. | Qualificação concedida pelo Ministério da Justiça. A lei exige que a OS possua um **Conselho de Administração**, do qual participem representantes do Poder Público; não exige que a OS tenha Conselho Fiscal. | A lei exige que a Oscip tenha um **Conselho Fiscal**; não exige que a Oscip tenha um Conselho de Administração. Não há exigência de que existam representantes do Poder Público em algum órgão da entidade É hipótese de licitação dispensável a contratação de OS pelo Poder Público, para o desempenho de atividades contempladas no contrato de gestão. | Não existe hipótese de licitação dispensável para a contratação de Oscip pelo Poder Público. A desqualificação como OS pode ser feita pelo Poder Executivo, em processo administrativo, assegurado o contraditório e a ampla defesa. | A desqualificação como Oscip pode ser feita a pedido da própria entidade, por iniciativa de qualquer cidadão ou do Ministério Público, em processo administrativo ou judicial, assegurado o contraditório e a ampla defesa. Por fim, ressalte-se que uma entidade **não pode ser qualificada como OS e OSCIP ao mesmo tempo**. Espero que tenham gostado, até a próxima!<file_sep>/_posts/2017-08-20-quais-sao-os-seus-gigantes.md --- layout: post title: Quais são os seus gigantes? categories: devocionais image: gigante.jpg logo: gigante.jpg description: Quais são os seus gigantes? - 1 Samuel 17:1-50 url: https://eltonsantos.github.io/devocionais/quais-sao-os-seus-gigantes/ author: <NAME> comments: true --- <p class="intro"><span class="dropcap">P</span>az do Senhor a todos, meus amados irmãos! Essa pregação eu ouvi na minha igreja em um culto de jovens e o Senhor falou tremendamente comigo através dessa mensagem e resolvi compartilhar o que entendi aqui.</p> O texto narra algo inexplicável aos olhos humanos, um grande episódio do antigo testamento em que Deus mostra sua soberania através da batalha de um "menino" com um gigante. (Digo menino entre aspas por que para que possamos enfrentar nossos gigantes devemos ser muito homens e maduros para tal feito). E essa pregação tem como tema e objetivo analisar o caráter que fizeram do menino Davi um vencedor de gigantes. Quais eram as suas características? Antes disso, gostaria de contextualizar aquele momento, o momento que antecedeu a batalha entre Davi e Golias. Levantar alguns pontos importantes. Vamos lá. O povo pede um rei a Samuel, rejeitam a administração de Deus e querem ser como as demais nações, querem ter um rei físico, que pudessem vê-lo. Então Deus manda Samuel ungir Saúl rei. Durante o turbulento reinado de Saul teve o episódio de Jônatas, que feriu a guarnição dos Filisteus (1 Sm 13:3) e com isso fez despertar mais ainda a ira dos filisteus sobre o povo de Israel. Após esse evento Saúl desobedece a Deus por medo dos Filisteus (1 Sm 13:13) e do que pudesse acontecer com ele. E por conta disso Deus levanta outro rei (1 Sm 13:14) então surge o início da história de Davi. E é esse o cenário atual, um povo espantado, um rei precipitado e com medo e um inimigo muito mais forte e irado bem próximo. Agora, quem eram os filisteus? Quem era esse povo que assombrava o povo de Israel? **Quem eram os Filisteus?** Tiveram origem em Gn 10:14 <b class="red">(Noé -> Cam -> Mizraim -> Casluim -> **Filisteus**)</b>, eles concorriam pela terra de Canaã. Era o espinho na carne e inimigos ferrenhos históricos dos Israelitas. Em Êxodo 13:17, Deus não levou o povo pelo caminho mais perto por causa dos Filisteus, para que o povo não se arrependesse vendo a guerra e voltasse para o Egito. Eles possuíam organização militar, construção naval avançada, serviço militar obrigatório, organização política, monopólio do ferro. Tudo bastante avançado para a época. De fato, era um povo que nasceu para guerrear. Além disso, cultuavam a outros deuses: Dagon (peixe, plantio e agricultura), Belzebu (moscas), Astarote (deusa da guerra). Eles roubaram a arca (1 Sm 5:1). Um outro detalhe é que Dalila era uma filisteia, já da pra entender com isso que esse povo não eram boa peça mesmo! E aí Golias veio desse povo, um gigante de aproximadamente 2,90m e que poderia decidir sozinho uma batalha. Irmãos, o mundo e seus problemas personificam os filisteus, Golias é a adversidade maior, nosso gigante, que nos faz estremecer, paralisar... durante 40 dias Golias se apresentou atormentando os Israelitas, fazendo-os tremer de medo... Quais são os seus gigantes? O que te persegue desde criança? É um trauma, pais descrentes, desemprego, aparência física, fofoca, estudos, emoções, separação, alcoolismos, vícios, pornografia, sua história em si? Quanto tempo esse gigante vai ficar te atormentando? Os Filisteus desafiaram por séculos o povo de Deus, no entanto irmãos há um momento em que Deus da um basta! E nesse momento Deus levantou Davi, um menino, mas o que faz desse menino ser diferente dos outros? As características que Davi apresentou ainda como jovem chancelaram o fato dele ser um homem segundo o coração de Deus e isso permite extrair informações valiosas para nossas vidas. Vou apresentar para vocês **8 características** que encontrei sobre Davi: ### CARACTERÍSTICAS DE DAVI **1 - Ocupado** Já no seu chamado (1 Sm 16), Samuel vai escolher o rei na casa de Jessé e Davi estava trabalhando. Irmãos, Deus não costuma levantar pessoas desocupadas, Deus procura capacitar os ocupados: Um trabalhador ocupado com seu trabalho, um estudante ocupado com seus estudos, um desempregado ocupado buscando um emprego. Um obreiro, pastor, ocupado com a obra e com a congregação. No atual cenário do nosso país, está tudo afundando, pessoas querem dinheiro fácil, ganhar sem trabalhar, não se esforçar para ter as coisas, querem somente sombra e água fresca sem mover um músculo se quer. Se você não gosta de trabalhar, tenho uma má noticia pra você, foi Deus que fez o trabalho! Deus deu um ofício a Adão, logo o trabalho veio antes da queda do homem, claro que o trabalho era bem mais prazeroso e sem o suor como agora! Outras passagens que Deus fala acerca do trabalho estão em Salmos 128:1-2; Pv 6:1-6; Jo 5:17 (Durante a cura do paralítico). Outro exemplo que sempre gosto de citar é de Paulo, que trabalhava fazendo tendas (Atos 18:1-3; 1 Ts 2:9-10). Se o Senhor mandasse escolher você hoje, o que estaria fazendo? Aos mais jovens, matando aula? Enganando seus pais? Procrastinando com seu emprego? Levando seu chefe no papo? Ou estaria como Davi, ocupado, trabalhando? Você deve está ocupado para ser separado para uma condição que te leve a vencer seus gigantes! O evangelho das gerações passadas tinham uma autoridade espiritual e social elevada, pois estavam baseados no trabalho e não em uma forma de sobrevivência ao qual temos hoje. **2 - Responsabilidade (17:20)** Em 1 Sm 17:17-18, Jessé chama Davi para ir deixar alimentos para seus irmãos. Davi ouviu e obedeceu seu pai. As vezes seu gigante cresceu por que você não ouviu seu pai, sua mãe, seu líder, seu pastor... Deus não usa qualquer pessoa, você precisa mostrar as qualidades que lhe credenciam para tal função e para todas elas você precisa ser responsável e ter responsabilidade com aquilo a que Deus o incumbir. **3 - Atento (17:23)** Davi sabia e ouvia os insultos do filisteus e viu a aflição do seu povo. Esteja atento aos detalhes que acontecem, esteja atento e sensível aquilo que o Senhor falará com você **4 - Prudente (17:26)** Davi era prudente, quando ele falou: <b class="gray">"Quem é, pois, este incircunciso filisteu, para afrontar os exércitos do Deus vivo?"</b>. Ele sabia o que estava falando, ele sabia que suas palavras seriam como uma afronta, um desafio, um chamado para uma iminente guerra, mas ele já havia pensado nas consequências e entregado a causa a Deus. Irmãos, quantas vezes tomamos as decisões erradas por que não nos certificamos das circunstâncias. A pressa nos leva a super valorizar os nossos problemas. Procurem sempre parar e pensar, mas principalmente orar antes de tomar qualquer decisão, sejam em palavras ou em atitudes. **5 - Corajoso (17:26)** Característica óbvia pra quem precisa encarar um monstro, era natural que Davi tivesse isso, mas o fato é que Davi não se deixou levar pelas circunstâncias adversas. Um gigante, forte, assustador, humanamente impossível de ser derrotado por um menino, todo cheio de armadura e bem preparado. Davi poderia ter ficado calado, recusado, se escondido, mas ele foi corajoso, pois sabia que estava com Cristo e essa luta seria conduzida por Ele e não pela força de Davi. **6 - Pensa positivo (17:32-33)** Davi pensa positivo enquanto Saul tenta lhe desqualifica-lo, dizendo que ele não é capaz. Davi se justifica e recorre a sua experiência cotidiana e mostra que ela o capacitou para enfrentar o mal no dia que chegar. Pois Davi lá no campo estava adquirindo experiências com várias outras coisas e com isso sempre busca pensar positivo. Irmãos, um detalhe aqui, tenham cuidado com as pessoas que vocês ouvirão conselhos, algumas são usadas por Deus, porém outras vem apenas para tentar diminuir e estragar seus planos e sonhos. Muito cuidado com as pessoas nas quais vocês contarão seus objetivos e sonhos, pois nem todas essas pessoas estão aptas a ouvi-los. **7 - Aprender a lidar com os problemas (17:34)** Não desperdice momento com lágrimas e lástimas, chorando pelo seu gigante, faça como Davi: vá e treine. Lembrem-se das experiências anteriores de Davi, a Bíblia fala de Leão e Urso, mas certamente houveram outras coisas. Pequenas oportunidades te preparam para oportunidades maiores; pequenas experiências vão sendo acumuladas para te preparar a resolver grandes problemas. **8 - Aprenda com o que está em volta (17:40)** Davi não escolheu nada do que não havia costume, ele não escolheu armas de guerras, escolheu objetos que já tinha costume: cajado, seixo, funda. Davi não precisou inventar nada. Aprendeu com o que estava em sua volta, muitas vezes temos tudo para derrubar nosso gigante e não conseguimos enxergar. E é com essas características, meus irmãos, que Davi responde ao filisteu com uma convicção tão grande, de quem tem uma enorme intimidade com o Senhor, de quem se apossa das bençãos prometidas e diz: <b class="gray">"Tu vens a mim com espada, e com lança, e com escudo; porém eu venho a ti em nome do Senhor dos Exércitos, o Deus dos exércitos de Israel, a quem tens afrontado. 1 Samuel 17:45"</b> Nesse momento, antes mesmo de Davi derrubar Golias, ele já tem vencido a guerra! As suas armas não eram palpáveis, não foi a funda que venceu a guerra, ela foi apenas o instrumento usado para desferir um dano físico, no entanto a sua arma principal foram seus joelhos no chão, foi confiar nas promessas do Senhor, foi crer no Deus de Israel, no Deus do impossível, no Deus que não deixa seu povo se envergonhar e nem servir de chacota sendo humilhado por incrédulos. Acreditem fielmente nesse Deus! E como diz no verso 47, do Senhor é a guerra, parafraseando, não importa o instrumento a ser usado, quer seja espada, quer seja lança, o Senhor entregará nas nossas mãos o instrumento necessário para vencê-la. Davi vai ao encontro de Golias, toma uma pedra do alforje e com uma funda, acerta na testa e o gigante caí... simples assim, não pela sua força, mas pela sua convicção no Deus todo poderoso! Então meus irmãos, quais são seus gigantes? É um problema no trabalho? É um problema familiar? É um problema na igreja? É um problema de saúde? É um divórcio? Algum problema na escola? Um concurso público? Seja qual for o seu gigante eu quero encorajá-los a serem como Davi e enfrentarem seus gigantes, com fé, com convicção, segurando firme e forte naquele que te dá a força necessária para superar qualquer obstáculo! Aquele que é grande, que é Santo, que é poderoso e que nos quer mais que vencedores, esse sim merece toda a nossa honra, louvor e adoração. Amém! <file_sep>/README.md # My Personal Blog *NOTE: Blog in Portuguese* My blog personal create with github pages and using Jekyll. ## Dependencies - Ruby 2.3.3 version - Jekyll 3.4.0 version - Jekyll-feed 0.8.0 version - Jekyll-paginate 1.1.0 version - Jekyll-seo-tag 2.1.0 version - Jemoji 0.8.0 version ## Installation On terminal: 1. `git clone <EMAIL>:eltonsantos/eltonsantos.github.io`. 2. `cd eltonsantos.github.io`. 4. `bundle install`. 5. run `jekyll s`. ## Usage In your browser access `localhost:4000`. ## Contributing Bug reports and pull requests are welcome on GitHub at https://github.com/eltonsantos/eltonsantos.github.io. ## License The code is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT).<file_sep>/_posts/2017-12-22-devocional-22-12-2017.md --- layout: post title: Devocional - 22/12/2017 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Devocional - 22/12/2017 - Êxodo 8:25, Êxodo 8:28, Êxodo 10:1-11 e Êxodo 10:24-26 url: https://eltonsantos.github.io/devocionais/devocional-22-12-2017/ author: <NAME> comments: true --- <p class="intro"><span class="dropcap">P</span>az de Deus amados irmãos e irmãs, hoje quero fazer uma reflexão acerca do livro de Êxodo, sobre alguns versículos e vamos caminhar por ele um pouquinho. O tema do devocional de hoje é: "Não deixe <b>NADA</b> nas mãos do inimigo e tome de volta aquilo que o inimigo levou em nome de Jesus!".</p> Tomemos nota sobre algumas figuras representativas aqui: o faraó representa o inimigo, Israel representa o povo, o Egito representa o mundo, a escravidão representa o pecado e a libertação representa a nossa redenção em Cristo Jesus. Quando Israel estava pra sair do Egito, faraó fez quatro proposta pra Moisés para reter o povo na escravidão: **1ª Proposta - Êxodo 8:25** Adore a Deus no Egito mesmo, significa: quer ser crente? Que seja, não tem problema não, você não precisa mudar de vida. Se você fazia coisas erradas, pode continuar fazendo, se tinham relações sexuais antes do casamento podem continuar a ter. Se antes você via pornografia de madrugada, pode continuar vendo. Vá pra igreja, mas não precisa mudar de vida. Isso é proposta do diabo meus irmãos, e Moisés disse: nós vamos embora, não vamos levantar altar, aqui não é nosso lugar. Então faraó partiu para a segunda proposta; **2ª Proposta - Êxodo 8:28** Vá embora do Egito, mas fique perto, que significa: fique na igreja, mas curta o melhor da igreja e do mundo. Seja um crente meio a meio, mas aí Moisés voltou a afirmar: nós vamos embora! Irmãos, não dá para amar o mundo e a Deus ao mesmo tempo! Agora me digam, quantas pessoas que se dizem crente estão agarrando essa proposta heim? Tantos quantos nos dias atuais sucumbiram a essa proposta de faraó! **3ª Proposta - Êxodo 10:1-11** Aí o faraó partiu pra terceira proposta: Querem ir embora? Ta certo, mas os filhos de vocês ficam! Vocês servem a Deus e os filhos de vocês servem ao Egito. Divida a família. Irmãos, lugar de jovem curtir a vida NÃO é no mundo, diferente do que muitos pensam por aí. Lugar de jovem curtir a vida é no altar de Deus, é no altar de Deus que tem a plenitude da alegria. **4ª Proposta - Êxodo 10:24-26** Aí o faraó partiu para a última proposta: Pode ir embora, leve todo mundo, mas os rebanhos de vocês ficam. O que isso quer dizer é: você pode servir a Deus, mas o seu dinheiro serve ao mundo. E a resposta de Moisés é magnifica: nenhuma unha ficará no Egito. Ou seja, tudo o que somos e o que temos veio de Deus, é de Deus e está a serviço de Deus. Não deixe NADA nas mãos do inimigo, não desista de Sua família, não desista daquilo que Ele prometeu. **Lembre-se: O que Deus promete pra você é chegada certa e não caminhada fácil, é a vitoria e não a ausência de luta. Não deixe NADA nas mãos do inimigo e tome de volta aquilo que ele levou em nome de Jesus!** Que Deus abençoe a todos!<file_sep>/_posts/2017-06-18-tudo-e-vaidade.md --- layout: post title: Tudo é vaidade categories: devocionais image: vaidade.jpg logo: vaidade.jpg description: Tudo é vaidade - Eclesiastes 1:1-18 url: https://eltonsantos.github.io/devocionais/tudo-e-vaidade/ author: <NAME> comments: true --- "Vaidade de vaidades, diz o pregador, vaidade de vaidades! Tudo é vaidade." (Eclesiastes 1:2) <p class="intro"><span class="dropcap">P</span>az do Senhor a todos, hoje vou falar sobre o livro de Eclesiastes, este livro foi escrito por Salomão, filho de Davi, ao qual, no livro, se denomina o pregador. Salomão era conhecido por duas coisas, pela sua sabedoria, o homem mais sábio que pisou na terra e pela sua riqueza, conforme o que dissera Deus em 2 Crônicas 1:11 e ele escreveu três livros, Cantares durante sua juventude, Provérbios na sua fase adulta e Eclesiastes durante sua maturidade, ou terceira idade, ou velhice, como queiram chamar.</p> O que Salomão quis dizer com tantas repetições da palavra vaidade? Ele quis dar ênfase, destaque, queria chamar a atenção para algo tão importante. Na língua portuguesa quando queremos dar ênfase a algo importante colocamos em letra maiúscula, entre parêntese, entre aspas, em negrito; no hebraico quando se quer dar ênfase, enfatizar algo importante a palavra se repete, conforme pode ser lido nesse versículo e também em outro exemplo como Isaías 6:3, que diz: <b class="red">"Santo, Santo, Santo é o Senhor dos exércitos; toda a terra está cheia de Sua glória"</b>. Onde Isaías declara o atributo mais importante de Deus, e quis dizer que Deus é completamente Santo, por isso há essa repetição de palavras. Salomão era um homem sábio, significa que era um homem sereno, controlado, reflexivo, e isso não vem do homem, a sabedoria é dom de Deus e o seu princípio é o temor ao Senhor (_Provérbios 9:10_). E quando se falou de vaidade tantas vezes ele quis dizer que a vida é uma ilusão, o dinheiro é uma ilusão, os inúmeros romances são ilusões, a beleza é uma ilusão, tudo é ilusão, tudo é vaidade. Salomão teve tudo, teve mulheres de todos os tipos, dinheiro, tudo do bom e do melhor, recebia toneladas e toneladas de ouro por ano, carros, cavalos, cavaleiros, foi o maior rei depois de seu pai, Davi, e embora tenha tido tudo isso, ao final de sua vida ele reconheceu que tudo é vaidade, não serve pra nada, nada se aproveitaria. O que o mundo ensina sobre a felicidade, sobre o que é a felicidade pode ser muito bem confrontado com o exemplo de Salomão. O mundo ensina que o "ter" é o que nos faz feliz, que o prazer é o que nos faz feliz, que a felicidade se obtém através dos prestígios, das posses que podemos adquirir. É como se você fosse encontrar paz, satisfação, plenitude, razão de viver e felicidade em conquistas materiais obtidas durante sua vida. Ora, se essa filosofia que é tão disseminada pelo mundo fosse correta, não seria Salomão o homem mais feliz do mundo? Não seria Salomão o homem mais pleno e satisfeito que já tivera pisado antes na terra? Em 1 Reis a partir do capítulo 4 narra a história de Salomão como rei e a partir do 10 vai mostrando sua rotina, como que eram suas posses, quem era esse homem em relação a suas posses. O homem com bastante propriedade no assunto no que se diz respeito as riquezas, a posses, a bens, a mulheres, a prazeres, escreveu que tudo é vaidade. Podemos observar alguns aspectos das posses de Salomão: **1) Fortuna (1 Reis 10:14)** Salomão recebia por ano aproximadamente 25 toneladas de ouro entre presentes, escavações, doações... De acordo com o site [Dolar hoje](http://dolarhoje.com/ouro/), uma grama de ouro custa R$ 134,00, que daria uma fortuna de R$ 3.350.000.000,00 anualmente, se fosse convertido para nossa época. Somente de ouro, fora o que recebia de tributos e impostos de outros reinos e seus respectivos negócios. Certamente, conforme o câmbio atual, sua fortuna passaria dos 5 trilhões de reais. E não parava por aí, toda sua decoração, palácios, cavalos, guarda pessoal eram banhados, revestidos de ouro. Tamanha riqueza era algo tão exuberante e absurdo que nem mesmo o homem mais rico hoje ao menos se compararia com o que Salomão obtivera durante toda sua vida. Sua riqueza era tão imensurável e abundante que chegou-se até a uma desvalorização da prata (_1 Reis 10:21_), tornando-a insignificante, bem como o cedro e outras coisas valiosas na época (_1 Reis 10:27_). Não é por acaso que Deus diz que era o rei mais rico que já tivera pisado nesse mundo (_1 Reis 10:23_). Salomão jamais conseguiria gastar tudo o que tinha, pois possuía mais riquezas do que poderia gastar em toda sua vida. **2) Prestígio (1 Reis 10:24)** Salomão tinha muitos prestígios, ditava o que era certo e o que era errado, era considerado termômetro social, religioso, econômico, financeiro e político da terra. Era requisitado por todas as outras autoridades existentes, conselheiro dos grandes príncipes e reis da terra e todos iam até ele em busca de ouvir seus conselhos. Excedeu em sabedoria a todos os que tiveram pisado no mundo. Seu prestígio era graças a sua sabedoria fora do comum e sua linhagem real, filho de Davi, maior rei do Antigo Testamento, e não somente isso, era de uma linhagem messiânica, da sua linhagem descenderia o Messias, o qual Salomão fazia parte. O que Salomão falava, estava falado, suas decisões eram como uma linha conceitual que dividia o certo do errado, heresias de lei. **3) Mulheres (1 Reis 11:1-3)** Como se não bastasse, além de Salomão ser o maior expoente financeiro, profissional e intelectual da época e de todos os tempos, Salomão tinha uma vida amorosa bastante ativa, tantas mulheres que precisaria de quase três anos para se deitar com a mesma mulher novamente, caso ele quisesse ter uma mulher diferente por dia. Todo o clima sexual, sentimental, romântico Salomão experimentou, seria impossível enjoar tendo pra ele essa quantidade de mulheres de diferentes raças, cor, credo, porte físico e estatura. Esse era Salomão, tendo alcançado o auge de tudo, em todas as áreas, comendo, bebendo e desfrutando de tudo quanto havia de bom na terra durante sua vida, desde sua mocidade até chegar em Eclesiastes e escrever que tudo é vaidade, que tudo o que teve não passava de vaidade. Salomão prova que o que o mundo diz sobre o conceito de felicidade está completamente equivocado. Em nenhum lugar que passou, em nenhuma posse que teve, em nenhuma riqueza que obteve, em nenhuma mulher a qual ele deitou, ele encontrou felicidade. Tudo é vão, tudo é ilusão, tudo é pífio, tudo é vaidade. Em 1 João 2:15-17, retrata bem o que Salomão viveu e quis dizer, pois <b class="red">aquele que ama o mundo e as coisas que há no mundo, não alcançará a felicidade</b>, pois tudo passa, essa felicidade passa, as coisas do mundo passam e só os que fazem a vontade de Deus alcançam a verdadeira felicidade. Os nossos olhos, os nossos ouvidos, a nossa carne não se cansam de buscar uma falsa felicidade, uma novidade, uma nova tecnologia, uma nova profissão, uma nova comida, uma nova mulher, uma nova aventura, somos insaciáveis até chegar a hora de irmos pra tumba, jogar tudo ao vento e tornar tudo pó. Quem se lembra constantemente daqueles que passaram? Aquilo que era novidade a dez, vinte anos atrás, hoje não se houve mais falar. Geração morre, geração nasce e aquilo que aconteceu entrou pra história, caindo no esquecimento, restando apenas pouca coisa pra ser lembrada. Salomão desfrutou de todas as futilidades que a vida o proporcionou. Um ciclo vicioso de futilidades entre comer, beber, se relacionar, engordar, ficar doente, emagrecer e morrer, tudo sem algum ou nenhum propósito. Vou trabalhar, e trabalhar e juntar dinheiro e isso e isso e isso o que? Vou comprar isso e vou aqui e vou ali e vou isso e vou aquilo e vai aonde? Digo isso por mim também, pois certa feita uma pessoa bastante querida minha me perguntou: sabia que isso que você está estudando não te levará em lugar nenhum? Pois eu sei que você irá passar, realizará seu sonho, mas e depois? Sempre vai querer mais e mais e não vai se saciar e nunca alcançará a felicidade. Eu, respondendo, disse: depois de consegui meu sonho, serei feliz e quererei outro sonho e vou tentar alcançá-lo também. Respondi mal, parece até que parafraseei o que a ex-presidenta Dilma falou sobre quando alcançar a meta deveríamos dobrar a meta (rsrsrs). Vaidade! Tudo é vaidade! Jamais nos saciaremos com o "ter" e essa corrida pelo ter sempre cobra um alto preço, nos cobra o custo de toda nossa vida. Riqueza, poder, influência, prestígio, mulheres, tudo o que os seus olhos desejavam ele não os negou (_Eclesiastes 2:10_) e tudo isso em abundância pra no fim escrever que tudo é vaidade. Trabalhar igual maluco, estudar igual um doido, fazer uma corrida desenfreada pelo ter não vai te levar a lugar nenhum além de te deixar acabado, cansado, doente, com dor de cabeça, cheio de sequelas no corpo e no fim, jaz um pobre homem, velho, cansado e sem poder desfrutar o que pegou durante sua vida. Se tudo não passa de vaidade, de ilusão, de algo pífio e supérfluo, o que fazer então? Ficar em um mosteiro, em casa ou na igreja e não fazer mais nada ou outra coisa? Eclesiastes 12:13 tem a resposta: _"De tudo o que se tem ouvido, o fim é: Temer a Deus, e guardar Seus mandamentos; porque isto é o dever de todo o homem"_. Essa é a suma, a razão, o propósito da nossa existência: <b class="red">temer ao Senhor e obedecer as Suas Leis</b>. A única forma de encontrar a verdadeira, a real felicidade é viver para Deus, viver para a Glória do Senhor, viver pro crescimento e avanço do Reino de Deus e que possamos assim buscar constante crescimento espiritual. Meus irmãos, Salomão está nos ensinando que <b class="red">devemos viver pra Deus, obedecer Suas Leis e não nos deixarmos levar pelas coisas do mundo</b>, pois as coisas do mundo são superficiais, são passageiras, são ilusões, são vaidades de vaidade, porém aqueles que vivem para a Glória do Senhor, estes sim, encontram a verdadeira felicidade. Vivam da terra e desfrutem dela e do suor do trabalho que Deus deu a cada um, não há problema nisso, no entanto vivam pensando na eternidade, vivam com os olhos para a eternidade, andem em temor durante essa curta passagem que temos na terra, preguem o evangelho, obedeçam as escrituras e façam qualquer outra coisa para a glória de Deus! Para encerrar, como diz em 1 Coríntios 10:31, _"Portanto, quer comais quer bebais, ou façais, qualquer outra coisa, fazei tudo para glória de Deus"_. Seja qual for seu ministério, seja qual for seu chamado, seja qual for seu ofício, faça tudo para a honra e glória de Deus! Se for advogado, seja advogado para a glória de Deus, se for lixeiro, seja lixeiro para a glória de Deus e <b class="red">O exalte, onde quer que esteja</b>, fazendo o que estiver fazendo, pois a Ele toda a glória, o louvor e adoração pelos séculos dos séculos, amém!<file_sep>/_posts/2017-09-01-voce-e-um-protelador.md --- layout: post title: Você é um protelador? categories: devocionais image: protelar.jpg logo: protelar.jpg description: Você é um protelador? - Hebreus 5:12 url: https://eltonsantos.github.io/devocionais/voce-e-um-protelador/ author: <NAME> comments: true --- __"Porque, devendo já ser mestres pelo tempo, ainda necessitais de que se vos torne a ensinar quais sejam os primeiros rudimentos das palavras de Deus; e vos haveis feito tais que necessitais de leite, e não de sólido mantimento."__ **Hebreus 5:12** <p class="intro"><span class="dropcap">P</span>az do Senhor esteja convosco meus amados irmãos! Essa palavra foi ministrada na igreja que congrego e Deus falou tanto comigo acerca dessa palavra que na mesma semana eu postei meu testemunho aqui no blog, rsrs. Tudo por causa que eu não quero ser um protelador!</p> Você sabe o que é um protelador? De acordo com o dicionário, <b class="red">protelador é quem ou o que protela, adia, retarda ou no linguajar mais informal, empurra com a barriga, deixa pra depois</b>. Protelar é você ir adiando o que precisa ser realizado. Como diz no texto bíblico, pelo tempo já era pra ser mestre, mas ainda necessita de leite, ou seja, ainda quer ser tratado como um novo convertido, em vez de já se comportar como um crente de verdade e ter passado por uma verdadeira mudança! Tudo por que prefere ir empurrando com a barriga, deixando as coisas paradas em vez de já está pronto pra fazer a obra. Tendemos a ficar numa zona de conforto, a deixar pra depois, a esperar que a situação regrida a algo pior, a demorar a crescer espiritualmente, porém a verdade é que sempre encontramos desculpas para não mudar. De fato, uma das coisas mais difíceis e complicadas é mudar. As vezes preferimos nos acostumar a viver com nossos defeitos do que mudar, preferimos remediar do que prevenir outros erros, justamente por causa de que é muito duro buscar uma mudança. Irmãos, se alguém quer mudar, quer mudar a forma como vive, deve primeiro mudar a maneira como pensa. Tudo começa como pensamos, precisamos passar por uma intensa e gradativa mudança, precisamos mudar o foco. Mudanças duradouras dependem da mudança dos nossos pensamentos. Então irmãos, quais objetivos que devemos focar e os passos que devemos seguir para não desistir de mudar? Listei aqui quatro focos, objetivos ou passos que devemos seguir: **1º Foque no poder de Deus** Devemos focar em Deus e não na nossa própria força de vontade. Irmãos, a nossa força de vontade não é o bastante para termos uma mudança duradoura. Exemplo: quantas vezes você já tentou parar de mentir? De adulterar? De fornicar? De emagrecer? Ou tentou ir a academia pra ficar mais forte ou sem os pneuzinhos? Quantas foram as tentativas em vão? quantas vezes acabamos por nos frustrar? Até conseguimos por um certo tempo, porém depois perdemos a força, perdemos aquele gás inicial e enfraquecemos. <b class="red">Força de vontade não é o suficiente para mudar o curso de nossas vidas</b>. Quem está acostumado a fazer o mal e o que é errado nunca vai mudar e deixar de fazer por si mesmo. Porém a boa notícia é: __Filipenses 4:13, "Tudo posso naquele que me fortalece"__. Precisamos do Senhor! Precisamos parar de olhar para homens e precisamos focar em Deus e no seu infinito poder! Olhar pra Cristo e para a Cruz! **2º Foque nas coisas boas** Precisamos focar nas coisas boas e não nas dificuldades. Não estou falando de ser positivista, mas estou falando de focar naquilo que vai ocupar seu pensamento. Exemplo: Se você quer ser concursado, seu pensamento deve está na alegria de saber o resultado, deve está no ato da sua posse, de como será sua carreira como servidor e de como foi compensador ter chegado lá. Esse deve ser o foco e não em suas reprovações de percurso ou no tempo "perdido" que teve que esperar para chegar. Devemos ter cuidado com nosso foco, se estamos entupidos de pornografia, lascívia, sexo na nossa mente, é isso que vai dominá-la. <b class="red">Aquilo que ganha nossa atenção, certamente nos ganha</b>. Devemos encher nossa mente e nosso tempo com a Palavra de Deus, assim onde tem Luz, as trevas batem em retirada. Irmãos, somos transformados pela renovação da nossa mente e somos libertados pela verdade de Deus! **3º Foque no progresso** Devemos focar no progresso e não na perfeição, justamente para não desanimarmos, pois a caminhada é longa. O Senhor não está atrás de pessoas perfeitas, mas sim de pessoas verdadeiras. Mudanças são feitas por vez, por estágios e isso leva um bom tempo. Comece pedindo a Deus que mostre onde Ele quer começar primeiro. Sejamos bem específicos nesse pedido. O que você tem? É um vício? É um temperamento? É um sentimento? Busque uma vitória por dia. Se passar em frente a um bar e já não sentir tanta vontade como antes dê glória ao Senhor! Isso já é um progresso, devemos caminhar assim, de vitória em vitória, de glória em glória, de fé em fé e creia meu irmão! <b class="red">Aquele que começou a boa obra vai completá-la</b>. Deus não desiste de você e a nossa vitória vem dia após dia. Muitos desanimam por não verem mudanças imediatas, mas meu irmão quero te dizer: 30 anos no Egito e querer consertar tudo em 3 meses? Assim não dá né?! <b class="red">Mudança é uma decisão de entrar em um processo diário com Deus</b>. Foque no progresso de cada feito e dê glórias ao Senhor. **4º Foque em pessoas que te ajudem** Foque em pessoas que torçam por você, torçam pelo seu sucesso e não naquelas que te impedem. As más companhias corrompem os bons costumes, prefira se aproximar de boas companhias, nunca vamos melhorar se não mudarmos nossos relacionamentos interpessoais. Exemplo simples: Quem tem problema com álcool nunca vai mudar se continuar a ser influenciado pelos amigos que bebem. Diga quem tu andas, que digo quem tu és. Irmãos, precisamos de Deus e também precisamos de outras pessoas que estão na mesma busca ou torcida. Não é por acaso que os alcoólicos anônimos funcionam, apesar de nunca ter frequentado, conheço muitos exemplos que largaram o vício quando passaram a frequentar, ali um precisa do outro, estão unidos em um mesmo propósito, parar de beber. Esqueça essa de querer conseguir sozinho, <b class="red">você precisa de Deus e de pessoas que te queiram bem!</b> Se aproxime de quem tem Deus, de quem te diz a Verdade! Para concluirmos amados, peça a Deus que lhe mostre por onde Ele vai começar e não desista, não adie, não seja um protelador, foque no Senhor e no Deus que Ele é, poderoso para realizar qualquer coisa, foque nas boas coisas, foque no seu avanço diário e foque nas pessoas que te ajudam, em nome de Jesus, amém!<file_sep>/js/script.js $(window).scroll(function() { ($(this).scrollTop() > 160) ? $('nav').addClass("sticky") : $('nav').removeClass("sticky"); });<file_sep>/_posts/2017-11-12-o-privilegio-do-cristao.md --- layout: post title: Reflexão - O privilégio do cristão! categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Reflexão - O privilégio do Cristão - Salmo 122:1 url: https://eltonsantos.github.io/devocionais/reflexao-o-privilegio-do-cristao/ author: <NAME> comments: true --- __"Hoje me alegrei pois disseram, vamos a casa do Senhor"__ **Salmo 122:1** <p class="intro"><span class="dropcap">C</span>omo é bom servir ao nosso Deus, não há privilégio maior, não há nada que possa nos satisfazer mais do que servir Jesus Cristo. Não há sentimentos que eu mais tenha prazer em ter do que aqueles que o Espírito Santo me faz sentir. Essa é a maior honra que o homem pode ter: Ser cristão e servir a Jesus Cristo. Partilhar do glorioso evangelho.</p> Quero que vocês tenham o mesmo sentimento, que a igreja seja tudo, que Cristo seja tudo, que o evangelho seja tudo, que a fé na palavra seja tudo. Falo com propriedade no assunto, falo com base naquilo que digo, com conhecimento de causa, de um homem que não era nada e perdeu tudo por não ter desde o princípio feito de Cristo tudo pra mim. Hoje o meu prazer é está na casa do Pai, como diz aquele louvor: "Eu prefiro está no meio da congregação e no meio dessa comunhão, servindo ao meu Deus e ao meu irmão". Irmãos não buscais felicidade ou qualquer outro passatempo em outro lugar, de fato nada acharão, buscais prazer naquilo que realmente vos tratá a felicidade plena, não algo temporário. Buscais prazer nas coisas de Deus, naquilo que Ele fez e faz para nós constantemente. Glorificado seja o nome do Senhor, amém!<file_sep>/_posts/2017-12-29-ser-homem-de-deus.md --- layout: post title: Ser homem de Deus! categories: devocionais image: homemdedeus.jpg logo: homemdedeus.jpg description: Ser homem de Deus - 1 Timóteo 6:11-12 url: https://eltonsantos.github.io/devocionais/ser-homem-de-deus/ author: <NAME> comments: true --- __"Mas tu, ó homem de Deus, foge destas coisas, e segue a justiça, a piedade, a fé, o amor, a perseverança e a mansidão. Combata o bom combate da fé, toma posse da vida eterna, para a qual também foste chamado, tendo já feito boa confissão diante de muitas testemunhas."__ 1 Timóteo 6:11-12 <p class="intro"><span class="dropcap">P</span>az de Deus meus amados irmãos. Faz um tempo que Deus me incomoda para meditar sobre essa Palavra e hoje vou compartilhar com os irmãos o que eu aprendi sobre ela. Antes de tudo quero deixar claro que muitas vezes quando a Bíblia fala de homem, ela se refere a homem e a mulher, somente quando é algo mais específico que ela faz distinção entre ambos os sexos e de acordo com o próprio contexto podemos discernir o sexo a qual ela se refere. Então, a Palavra aqui valerá tanto para homens, quanto para mulheres, embora em alguns casos específicos eu direcionarei a Palavra ao cabeça, ao homem. Muito bem, ser homem de Deus, o que é ser homem de Deus? Talvez seja o maior título, o título mais nobre que um homem pode conseguir aqui na terra, ser chamado de homem de Deus.</p> Ser homem de Deus resumidamente é fugir disso, seguir aquilo; deixar de fazer essas coisas, fazer essas outras. Pra ser um homem de Deus é preciso fugir de tudo aquilo que tenta nos desviar da verdade. O primeiro passo que o texto fala para ser homem de Deus é fugir, resistir a esses ataques, ataques do falso evangelho e da mentira. Outro passo para ser homem de Deus é combater o bom combate da fé, ou seja, é preciso lutar por aquilo que Deus já te deu, a fé. É uma luta diária, contínua, exaustiva, mas acima de tudo recompensadora, pelos resultados, pelo aprendizado, pelos objetivos do bom combate da fé. No texto também fala em "tomar posse", aqui não confunda com o "tomar posse" que as igrejas neo pentecostais falam sobre tomar posse, que é benção, dinheiro no bolso, prosperidade, não, não é isso que a Bíblia diz. Aqui tomar posse está no sentido de se apegar, cuidar com carinho, agarrar. É pegar algo que você já tem se apegando a ela, agarrando-a, não a perdendo-a de vista, pois é a eternidade que é mais importante que nossa vida terrena. Todos os verbos citados no texto são fortes: Persegue as virtudes, combata a luta da fé e apegue-se a vida eterna. Com essas coisas nós seremos homens de Deus. Irmãos, eu poderia terminar aqui a mensagem que já daria pra entender o significado de ser homem de Deus, porém quero ir um pouco mais além e tratar de outro ponto sobre ser homem de Deus. O ponto está em 1 Reis 2 verso 2, depois podem conferir: Davi ta velho, estava no leito de morte e ele vai dar vários conselhos a Salomão, que vai herdar simplesmente tudo aquilo que Davi construiu, vai herdar a maior nação, maior império, vai herdar toda Israel. Salomão tinha ali entre seus 16 e 18 anos e irmãos imaginem quantos conselhos você receberia de Davi, o Rei de Israel. E o primeiro conselho que Davi da a Salomão é: meu filho seja homem! Porque? Por que ele disse isso? Por que você vai ser pai, por que você vai ser rei, por que você vai ter um ofício, por que você vai ser um profeta e todas essas coisas requerem que você tenha maturidade, requerem que você tenha virilidade, requerem que você seja homem! Meus irmãos, ele poderia ter falado qualquer coisa, poderia ter dito: Salomão ora, jejua, leia a Bíblia, mas não, o primeiro conselho que ele deu a Salomão foi: seja homem! Irmãos, Salomão foi pai, foi marido, foi monarca, foi chefe de estado, foi sacerdote, foi escritor e pra ocupar todas essas posições primeiro é preciso ser homem! Sua mulher não precisa de um menino, precisa de um homem, Israel não precisa de um menino, precisa de um homem! Seja homem, foi isso que Davi disse! Agora irmãos vou falar sobre três áreas onde nós devemos ser homens de Deus. ## Ser homem de Deus no trabalho A primeira delas é ser homem de Deus no trabalho. Um homem de Deus deve ser aplicado em tudo o que faz, deve ser íntegro dando um bom testemunho, deve sempre procurar ser o melhor, ter o melhor desempenho no seu trabalho ou fazer de tudo pra ter, não deve ser preguiçoso, não deve procrastinar no trabalho e nem procurar desculpas para não trabalhar. Deve ser reto no trabalho mesmo quando ninguém estiver por perto olhando ou fiscalizando. Ser homem de Deus no trabalho é se submeter ao patrão, mesmo que seu patrão seja ímpio, deve obedecer as suas instruções, exceto quando elas vão contra os princípios da moral, ética e da legalidade ou quando afrontam aos princípios sacrossantos de Deus. Não caindo nessas exceções, o cristão, sendo homem de Deus deve não somente obedece-lo, mas procurar dar seu melhor e cumprir com maestria o que lhe foi incumbido. Lembre-se que seu chefe é uma autoridade levantada por Deus e você tem por obrigação se submeter a ele como a própria Bíblia ordena. E sempre procurar ser o melhor funcionário, não para buscar primeiramente uma promoção ou um aumento salarial, pois isso vem como consequência, mas sim para honrar a Deus no ofício que Ele deu para você. ## Ser homem de Deus no casamento A próxima área que devemos ser homens de Deus e provavelmente a mais difícil devido ao grau de importância e complexidade é no casamento. Pra ser um homem de Deus no casamento, pra ser um marido bíblico, um marido excepcional, um marido segundo a Bíblia recomenda, o homem deve ser quatro coisas: **1 - Sacerdote** Sacerdote no antigo testamento era aquele que vai diante do povo até Deus. Ele é o intercessor e o responsável pela vida espiritual da família, que coloca os fardos da família nas costas e vai a Deus suplicando por perdão, misericórdia e graça. É o marido quem apresenta e representa a sua família diante de Deus. Ele deve ser um homem espiritual e ter comunhão com Deus. E se você não tem condições de assumir a responsabilidade espiritual de uma mulher, você não tem condições de casar. **2 - Profeta** O profeta é aquele que vai diante de Deus para o povo. Ou seja, no antigo testamento, o profeta ia até Deus em jejum, oração, recebia a mensagem e comunicava para o povo. É dever do marido, sendo homem de Deus no casamento, se debruçar nas Escrituras Sagradas, buscando uma palavra, buscando a vontade de Deus, buscando direção para aquela causa, estudando, meditando e depois quando Deus o responder, ele comunicará a família. **3 - Provedor** Quando Adão pecou Deus não disse: "do suor da tua mulher comerás", é do suor do rosto do homem comerás! A responsabilidade de prover a casa e família é a do homem. O homem tem que suprir financeiramente a sua família naquilo que é necessário: alimento, moradia, educação, vestuário, a parte médica. É o homem que tem que ter competência para prover os recursos necessários para uma vida digna. Isso não significa que a mulher não possa trabalhar, não. Mas a responsabilidade maior é a do homem e é isso que o mundo não entende e inverte os papéis! Se você não tem condições de prover os recursos materiais para uma mulher, você não pode casar. **4 - Protetor** É aquele que se coloca entre a sua família e aquilo que ameaça a sua família, quer seja físico, quer seja espiritual. Não é somente proteger em oração e jejum não, pois vai ter hora que você irá ser abordado. Irmãos, de que adianta ganhar o mundo e perder a tua casa? Ouçam isso: nenhum sucesso compensa a queda da família, nenhum. Quer ser homem de Deus no seu casamento? Honre a sua esposa! Ela é o maior tesouro que Deus te deu aqui na terra e é seu maior ministério. Você tem que amar a sua mulher a ponto de dar a vida por ela, a Bíblia não manda você dar a vida por nenhuma outra pessoa, mas ela manda você da a vida pela sua mulher. Se você tiver num barco e este estiver afundando e nele ta você, sua esposa e seu filho, e não tiver outro jeito além de ter que salvar apenas um, você tem que salvar sua mulher. Ai tudo bem, você pode nunca passar por essa situação ou ter motivos de ter que dar a vida pela sua mulher, pode nunca ter acontecido algo para você demonstrar que da de fato a vida pela sua esposa, mas aí eu pergunto: você é um marido que age como alguém disposto a dar a vida pela sua mulher a qualquer momento? A sua mulher precisa testificar que o marido dela a ama ao ponto de dar a própria vida por ela! Portanto deixará o HOMEM seu pai e sua mãe e unir-se-á a sua mulher. E hoje o que mais tem é homem frouxo, que só sabe fazer filho, mas não consegue assumir um lar, o que mais tem é homem que não é homem, homem. Homem que quando aperta, foge, não encara os fatos, não resolve, não mata no peito. Homens covardes, que tem medo de qualquer discussão. Irmãos, um homem, homem resolve assim: "A gente dorme 4h da manhã, mas essa contenda vai morrer aqui e agora". Isso é ser homem de Deus no casamento! E eu testifico isso como sendo um tapa na minha própria cara que se você não for HOMEM, HOMEM, você não deve se casar. ## Ser homem de Deus na vida E a última área que eu quero falar é Ser homem de Deus na vida, ser homem de Deus na vida é você está constantemente em comunhão com Ele, é está constantemente buscando ser a imagem e semelhança Dele, é fazer o Seu reino avançar. E lembre-se que: O cenário que está ai, que nós estamos vendo atualmente é impossível ser mudado por mãos humanas, é impossível ser mudado por dinheiro, por intelecto ou gritaria. Não vai mudar desse jeito irmãos. O reino das trevas está em ascensão, as igrejas corrompidas, o evangelho deturpado. Somente um homem de Deus e submisso a Ele, entregue pro cordeiro e pras Suas vontades, pode mudar o mundo. Se quiser fazer a diferença e ser um homem de Deus, você tem que pagar o preço. Deus não pode ser uma parte da sua vida, Deus tem que ser toda ela, Deus tem que ser a totalidade. Pague o preço, faz o que ninguém quer fazer, sobe aonde ninguém subiu, caminha aonde nenhum homem caminhou. Faz! Ser homem de Deus é isso e o Senhor está pronto para aquele que O quiser e Ele vai levantar o tal! Você vai ter que renunciar tudo que vem do mundo, vai te custar uma renuncia, adeus prazer, adeus sonhos materiais, ADEUS! Deus não pode dividir a sua vida ministerial com seu prazer pessoal. E quanto maior o nível que você chegar, quanto maior o nível que você desejar, maior será a perseguição que virá contra você, maior o numero de demônios que vão combater sua vida. Não será fácil! E Jesus deixou claro isso, pois Ele não garantiu jornada tranquila, mas sim, chegada certa para aqueles que O seguirem. E quem O seguir, e quem Deus chamar, quem Deus chamar para o Seu ministério, não pode mais voltar atrás, não tem mais volta, você se torna Dele, você está selado no céu, você é obreiro, você é lavoura e cooperador da obra Dele. Irmãos, nós vamos dizer como Paulo: __"Sinto prazer nas fraquezas, nos açoites, nas fomes, nos naufrágios, para que o poder de Cristo se aperfeiçoe em mim. Por que quando eu estou fraco, então é que estou forte. Quando sou perseguido, oprimido, rejeitado, açoitado, quando não tenho nada no mundo eu tenho tudo no céu, quando o mundo me desampara, Deus me recolhe com a Sua graça!"__ Um homem de Deus não pode viver sem perseguição, açoites, sem ter sua vida com Cristo consagrada. É tudo pra Ele, é tudo pra Deus! É isso que é ser homem de Deus na vida. Tudo para Sua honra, Tudo para Sua glória, Seu louvor e Sua adoração! E por fim, irmãos, que Deus desperte essa vontade de ser homem de Deus em cada um de nós, pois somente Ele tem o poder de nos tocar. Que nós possamos entregar nossa vida a Cristo incondicionalmente e guardar a Sua Palavra e o espírito Santo venha a nos chamar e verdadeiramente sejamos conhecidos por Deus e pelos homens como: homens de Deus! Que Deus abençoe a todos meus amados irmãos, agradeço mais uma vez a rica oportunidade, é tudo para honra e glória do Senhor, amém, amém e amém!<file_sep>/_posts/2017-07-29-a-verdadeira-batalha-espiritual.md --- layout: post title: A verdadeira batalha espiritual categories: devocionais image: armadura1.jpg logo: armadura1.jpg description: A verdadeira batalha espiritual - Efésios 6:11-17 url: https://eltonsantos.github.io/devocionais/a-verdadeira-batalha-espiritual author: <NAME> comments: true --- __"Revesti-vos de toda a armadura de Deus, para que possais estar firmes contra as astutas ciladas do diabo. Porque não temos que lutar contra a carne e o sangue, mas, sim, contra os principados, contra as potestades, contra os príncipes das trevas deste século, contra as hostes espirituais da maldade, nos lugares celestiais. Portanto, tomai toda a armadura de Deus, para que possais resistir no dia mau e, havendo feito tudo, ficar firmes. Estai, pois, firmes, tendo cingidos os vossos lombos com a verdade, e vestida a couraça da justiça; E calçados os pés na preparação do evangelho da paz; Tomando sobretudo o escudo da fé, com o qual podereis apagar todos os dardos inflamados do maligno. Tomai também o capacete da salvação, e a espada do Espírito, que é a palavra de Deus;"__ *- Efésios 6:11-17* <p class="intro"><span class="dropcap">P</span>az de Deus a todos, meus amados irmãos, hoje o Senhor pôs em meu coração essa passagem sobre o que é <b class="red">a verdadeira batalha espiritual</b>, aquele famigerado "dia mau", aquele dia em que parece que todas as armas do inimigo estão apontadas para você e mais importante do que saber o que é e identificá-lo é saber como vencê-lo. Então, como vencer o dia mau, como lidar com esse problema? Paulo nos dá a receita, mas antes, vamos identificá-lo.</p> Primeiro, basicamente o que é esse dia? Esse dia não é um dia qualquer onde coisas ruins sem gravidade acontecem com você, não é um dia em que você sofre um simples abalo, sai ileso e diz nossa que dia! Não, não, não. Esse é um dia bem mais complicado e complexo, é um dia onde você descobre ou acontecem coisas absurdas ao seu respeito, talvez uma perda do emprego sem você está prevendo, totalmente abrupta, talvez um acidente grave, talvez uma descoberta de uma doença, talvez uma cena chocante como encontrar seu marido ou esposa entrando em um motel e cometendo pecado, talvez uma morte de um ente querido, e que por conta dessas coisas esse dia possa acarretar em depressão, perda da lucidez, insanidade e te levar ao fundo do poço. Esse é o dia mau. Paulo diz que o dia mau vem, isso é um fato e nós sentimos quando estamos passando por ele e parece que nunca tem fim pelo impacto que ele faz em nossas vidas, porém Paulo diz que esse dia pode ser resistido, vencido, superado e a resposta está aqui: **"Portanto, tomai toda a armadura de Deus"** Se você não estiver totalmente revestido dessa armadura, não há como prevalecer. Paulo ilustra o dia mau como um combate, como uma luta. Na época de Paulo, o império Romano estava no seu auge, dominavam, eram máquinas de guerra. Naquela época, existiam os centuriões, homens do alto escalação do exército romano, extremamente fortes e preparados que usavam armadura quase que intranspassável, com seus escudos gigantes e espadas afiadas. O exército romano era praticamente imbatível para época, enquanto os oponentes vinham com paus e pedras, o exército romano faziam estratégias militares que destruíam facilmente vários homens. E por conta disso, Paulo fez essa analogia em dizer por que o império romano saia vitorioso dos seus combates: por causa que eles tinham o <b class="red">capacete da salvação</b>, a <b class="red">couraça da justiça</b>, o <b class="red">cinturão da verdade</b>, nos pés calçavam as <b class="red">sandálias do evangelho da paz</b>, no braço esquerdo o <b class="red">escudo da fé</b> e no braço direito a <b class="red">espada do Espírito</b> e uma vez que um soldado romano se reveste com toda essa armadura ele se torna invencível e não há como ser derrotado. É essa analogia que Paulo diz, que nós temos combates, mas não é contra carne, nem sangue, nem contra fulano, nem cicrano, mas sim contra as forças do mal, contra as astutas cilada do diabo, contra as hostes de satanás. **Tomem a armadura de Deus, para vencer essa guerra!** Meus irmãos, não pensem que essa armadura é física, não pensem que essa armadura é palpável. Não pensem que venceremos o diabo com a força dos nossos punhos, com dizeres do tipo: "ta amarrado", isso não existe, não faz sentido, a Bíblia não relata nenhum trecho onde um apostolo amarra um demônio, isso não é armadura, isso não vai funcionar, isso é mais pra uma alegoria de circo. Entendam, estamos falando de uma armadura espiritual, onde o campo de batalha também é espiritual, é na nossa mente e no nosso coração. A batalha não é física, onde o diabo vai arranhar sua parede, sua janela, até por que o diabo não teria pretensão alguma de fazer um risco em sua parede, é ridículo pensar assim. O campo de batalha é pra você desisti, desviar, apostatar de sua fé, pra você perde a sua fé e a sua crença no seu Deus. Paulo fala em lutas, fazendo uma analogia de corpo a corpo, onde é o diabo tentando contra nossa vida, pessoalmente, sondando nossas atitudes, nossos atos, nossas palavras, sentimentos e pensamentos que ele quer comprometer, é aí onde é o seu ataque. Quer vencer o dia mau? Novamente, **revistam-se da armadura de Deus**, meus irmãos! Em resumo, o que é essa armadura? Quando Paulo fala de coisas físicas, concretas sobre essa armadura, na verdade não é isso que está em questão, o que ele realmente quer dizer são as qualificações que vem em seguida. E essa armadura é composta por: #### Cinturão da verdade (Efésios 6:14a) O cinturão da verdade garantia defesa do quadril e os protegiam dos golpes baixos. O maior golpe baixo do inimigo é a mentira. A verdade e a mentira são coisas opostas; A verdade é a Palavra de Deus, por isso essa é a Sua primeira defesa. #### Couraça da justiça (Efésios 6:14b) Essa couraça é feita para proteger todo o abdômen, tórax e seus órgãos vitais, sendo o coração o mais importante, justamente por ser o órgão que o inimigo mais ataca. E uma vez ferido pelo inimigo, a sua função original deixa de funcionar. Por isso sejamos justos, aplicando a Palavra de Deus em nosso viver para formar defesas que guardam o nosso coração contra os dardos inflamados de Satanás. #### Sandálias do evangelho da paz (Efésios 6:15) Ter os pés calçados com as sandálias significa estar disposto a anunciar o evangelho por todo lugar, mesmo sendo o território do inimigo, por isso os nossos pés precisam estar protegidos e firmados no evangelho para que possamos através Dele levar a paz por onde quer que passemos, resgatando vidas e libertando cativos aprisionados. #### Escudo da fé (Efésios 6:16) Os escudos enormes dos soldados romanos cobriam todas as brechas e evitavam qualquer ataque dos adversários, de tal forma é o escudo da fé, pois uma vez segurando ele, com fé, cobrimos todas as brechas que o maligno usa para plantar dúvidas sobre a Palavra de Deus. E por meio da oração e da fé, fecharemos todas as brechas para evitarmos qualquer ataque contra nós. #### Capacete da salvação (Efésios 6:17a) A cabeça é o membro que comanda o nosso corpo. Se essa apresentar fraqueza, tudo estará perdido. O capacete guardará a nossa mente de ser atingida por outro evangelho, que não é o de Cristo. Com o capacete da salvação, somos guardados de todos os ventos de doutrina que Satanás usa para nos tirarmos da presença de Deus. #### Espada do Espírito (Efésios 6:17b) Tomar a palavra de Deus que é a nossa espada. A espada é a arma de ataque contra o inimigo de Deus. Através dela, vencemos todas as tentações que o maligno coloca diante de nós. A Palavra de Deus é o que nos alimenta, nos fortalece e nos encoraja a prosseguir dia após dia. Revestir-se dessa armadura completa é revestir-se dos atributos morais de Cristo Jesus. Toda a armadura se resume a: <b class="red">Santidade!</b> Quer vencer satanás? **Tenha santidade, essa sim é a arma mais eficaz,** toda a armadura se resume a santidade, pureza, obediência a Deus. O diabo não vai obedecer a um devasso que acha que fazendo tudo errado, que fica pegando óleo jogando em todo canto, falando "ta amarrado", ta isso e aquilo outro e uma vez que ele não está revestido com a verdadeira armadura, que é ter pureza de coração, ter honestidade com os dinheiros, ter amor a sua esposa, ter uma vida de oração, de jejum, de submissão a Deus, essa pessoa vai ser esbofeteada pelo inimigo! Uma doutrina neo pentecostal que vive uma vida toda errada, sem orar, sem jejuar, sem ser santo, fazendo coisas feias e erradas, mentindo, levantando falso e vem dizer: ta amarrado, ta queimado, rodopia, pula, planta bananeira, da cambalhota, bate a cabeça na parede, sei lá o que, isso não tem força nenhuma contra o inimigo se não tiver santidade! Então amados irmãos, oremos para que possamos buscar essa santidade constantemente e dessa forma nos revestiremos dessa armadura de Deus e para que assim possamos triunfar sobre as setas malignas. Que Deus abençoe a todos e que essa palavra tenha tocado em seus corações para a honra, glória, louvor e adoração ao nosso Deus. Amém!<file_sep>/_posts/2017-04-22-instalando-tomcat-windows10.md --- layout: post title: Instalando e configurando o Tomcat no Windows 10 categories: artigos image: tomcat.png logo: tomcat.png description: Instalando e configurando o Tomcat no Windows 10. url: https://eltonsantos.github.io/artigos/instalando-tomcat-windows10/ author: <NAME> comments: true --- <p class="intro"><span class="dropcap">H</span>oje vou ensinar para vocês mais um tutorial simples, porém bastante necessário que é a instalação do Tomcat no Windows 10.</p> Antes de tudo, certifique-se que já tenha instalado e configurado o Java em sua máquina. Caso não tenha, clique [aqui](https://eltonsantos.github.io/artigos/instalando-e-configurando-o-java-no-windows-10/). **O que é Tomcat?** Em resumo, Tomcat é um _servidor de aplicações web_ desenvolvido pela _Apache Software Fondation_, sendo mais especificamente um _container de servlet_, onde container pode ser entendido como um repositório para componentes de negócio. Agora que já sabemos o motivo que precisamos dele, sigam os passo a passo para deixar tudo 100% na máquina. ## Passo 1 Acesse a página oficial de download do [Tomcat](http://tomcat.apache.org/download-90.cgi) e baixe o instalador .exe * 32-bit/64-bit Windows Service Installer ## Passo 2 Após o download, vá clicando em Next normalmente. Na tela de "Tomcar basic configuration", na opção de "Tomcat Administrator Login" é opcional colocar um login e senha enquanto as demais opções não precisam ser modificadas. ## Passo 3 Na próxima tela certifique-se que o Tomcat esteja apontando corretamente para o **JRE** instalado em sua máquina. ## Passo 4 Não é necessário mudar o caminho que o Tomcat sugeriu, no meu caso foi: ```C:\Program Files\Apache Software Foundation\Tomcat 9.0```, porém há um padrão adotado de renomear e instalar nesse caminho, por exemplo: ```C:\Tomcat```, mas não há necessidade, apenas para facilitar. E continue com a instalação. ## Passo 5 Após concluir o processo, escolha rodar o apache, nesse momento um ícone próximo ao canto inferior direito aparecerá e o verde significa que o serviço está ativo e rodando. Com o serviço rodando basta digitar em seu navegador ```http://localhost:8080``` e pronto, o Tomcat já está devidamente instalado em sua máquina. Simples não? ### Nota **Outras configurações** Caso queira parar o processo do Tomcat ou escolher o tipo de inicialização clique com o botão direito no ícone que surgiu e clique na opção **Configure...**. Aqui você pode fazer diversas coisas além das citadas, como mudar o caminho padrão do Tomcat (não recomendo). ### Nota 2 **Instalação manual** Vale lembrar também que a instalação do .exe já configura automaticamente as variáveis de ambiente. Caso queira instalar manualmente (baixando o .zip) é necessário colocar o Tomcat em C:\ (para facilitar), configurar a variável de ambiente **CATALINA_BASE** e apontar para o local do Tomcat da mesma forma como foi feita na instalação do Java. E também criar uma outra variável chamada **CLASSPATH** e apontar o caminho da lib servlet, por exemplo: ```C:\tomcat\common\lib\servlet-api.jar;``` E após tudo isso abrir prompt de comando, ir até o diretório ```C:\tomcat\bin``` e digitar ```startup.bat``` para iniciar o Tomcat e acessar pelo ```http://localhost:8080```. Bem mais trabalhoso não é mesmo? É isso pessoal, a não ser que tenha um motivo maior, escolha a instalação automática quando se está em ambiente Windows. Um grande abraço!<file_sep>/_posts/2017-03-12-deus-esta-no-controle.md --- layout: post title: Deus está no controle categories: devocionais image: controle.jpg logo: controle.jpg description: Deus está no controle - Provérbios 3:5-6. url: https://eltonsantos.github.io/devocionais/deus-esta-no-controle/ author: <NAME> comments: true --- >__Confia no Senhor de todo o teu coração, e não te estribes no teu próprio entendimento. Reconhece-O em todos os teus caminhos, e Ele endireitará as tuas veredas.__ Pv. 3:5-6 <p class="intro"><span class="dropcap">Q</span>uantas vezes queremos fazer as coisas por conta própria? Quantas vezes queremos fazer do nosso jeito? Quantas vezes queremos fazer as coisas no nosso tempo? E conto uma novidade, quantas vezes fazendo dessa maneira nos prejudicamos ainda mais? Dificultamos ainda mais a situação e quebramos a cara por diversas vezes?</p> Sinceramente, eu não sei se acontece com todo mundo, pois conheço gente - pessoas não crentes - que parecem viver um mar de rosas sempre (ou pelo menos disfarçam muito bem). Bom, uma coisa que Bíblia diz para os que creem: que nessa vida você vai encontrar dificuldades, vai sofrer, vai chorar, mas a Bíblia também nos dá um grande conforto, pois ela mesmo diz que Jesus venceu o mundo e por isso não devemos nos desanimar. E é justamente por isso que devemos ficar bem em meio a tribulação, pois Ele está no controle. Ele sabe o que precisamos e a hora que precisamos. Nós apenas devemos dá nosso primeiro passo e confiar Nele com todo nosso entendimento e fé. Contando um breve e resumido testemunho recente do que aconteceu comigo quando entrei no meu emprego atual. Eu havia feito um teste para a empresa pública em que estou agora e não fui aprovado, fiquei em segundo lugar. E praticamente um mês depois, eu já tinha esquecido tudo, estava descansando no Senhor e orando. Estava despreocupado com situação financeira, pois eu sabia que cedo ou tarde o Senhor me colocaria no lugar em que Ele havia reservado pra mim. E numa tarde como outra qualquer recebo uma ligação de lá dizendo, resumidamente: Junte seus documentos o mais breve possível que você já começará a trabalhar semana que vem! Eu fiquei assustado, espantado e soube na hora que foi Deus que me deu isso. E curiosamente eu perguntei: O que aconteceu? E a pessoa do outro lado respondeu: O primeiro lugar desistiu! Pronto! Não há como não dizer que Deus não estava no controle da situação. Por mim, eu teria ficado em segundo lugar! Por mim, eu não teria entrado nessa empresa. No entanto, Deus moveu "seus pauzinhos" e me colocou onde Ele quis que eu estivesse. Eu resumi bem essa história para não deixar o texto longo demais, porém eu não posso deixar de dizer novamente: Deus está no controle! E para testificar que realmente foi obra do Senhor, quando eu cheguei lá no primeiro dia meu supervisor perguntou: "Você sabe por que está aqui?". Eu respondi: "Por que o Senhor quis, Ele me pôs aqui". E o supervisor respondeu: "Eu sabia que isso era fruto de joelho no chão, não poderia ser outra coisa"! Irmãos amados, não nos preocupemos tanto com o que há de vim, Deus está no controle, basta confiarmos Nele. As tempestades virão sim, mas não pereceremos nelas, pois Deus possui o controle de todas as coisas e Ele sabe o tempo certo de nos levar ao deserto para nos ensinar o Seu amor e nos tirar de lá e nos colocar onde Ele quer. <span class="gray">_"Você me leva ao deserto pra falar de amor, me deixa passar pelo vale pra mostrar que está comigo. Me põe no meio da tempestade e pinta um arco iris pra me mostrar que tua fidelidade não acabou."_</span> <small class="gray"> __Trecho da música Você Me Leva Ao Deserto, do Ministério Zoe__ </small> Amém!<file_sep>/_posts/2018-01-31-devocional-31-01-2018.md --- layout: post title: Devocional - 31/01/2018 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Devocional - 31/01/2018 url: https://eltonsantos.github.io/devocionais/devocional-31-01-2018/ author: <NAME> comments: true --- __"E Jó tomou um caco para se raspar com ele; e estava assentado no meio da cinza. Então sua mulher lhe disse: Ainda reténs a tua sinceridade? Amaldiçoa a Deus, e morre. Porém ele lhe disse: Como fala qualquer doida, falas tu; receberemos o bem de Deus, e não receberíamos o mal? Em tudo isto não pecou Jó com os seus lábios."__ Jó 2:8-10 <p class="intro"><span class="dropcap">P</span>az de Deus a todos. Seguindo a ordem cronológica da Bíblia que estou lendo atualmente, neste devocional venho meditar hoje sobre o livro de Jó. Jó foi um homem que teve sua vida abalada em todas as áreas, teve suas finanças abaladas, sofreu com a morte de todos seus filhos, foi acometido com problemas de saúde e mesmo tendo sido atingido em todas essas áreas a Palavra de Deus diz que Jó em nenhum momento pecou contra o Senhor.</p> Irmãos, quando você estiver passando por sofrimento ou se você já estiver passando por tribulações lembre-se que Deus permite que o sol nasça para os justos e injustos, para os bons e para os maus. O fato de ser fiel ao Senhor não significa que estejamos imunes ao sofrimento, tampouco significa que estamos em pecado, pois nem toda tribulação que passamos na vida é devida as consequências de nossos atos. Muitas vezes Deus permite que o inimigo nos ataque em diversas áreas, mas devemos nos manter firmes, porque mesmo não parecendo tudo está sobre o controle de Deus. Lembre-se que nossa vida é passageira e a nossa esperança em Cristo não pode se limitar a somente ser feliz nessa vida. Deus tem planos eternos e que vão além da nossa compreensão, podendo ser compreendidos somente com o sofrimento. E é por isso que no capítulo 42, Jó disse que antes conhecia Deus apenas de ouvir falar, mas que agora os seus olhos puderam ver a Deus, ou seja, Jó teve que passar por tudo isso pra saber de fato quem é o Deus a quem ele servia. Por fim, como diz no capítulo 5 de Romanos, devemos ser fortes e nos alegrar nos momentos de tribulações, pois as tribulações produzem perseverança e a perseverança produz experiência e a experiência produz esperança e a esperança não causa confusão, porquanto o amor de Deus está derramado em nossos corações pelo Espírito Santo que nos foi dado. Que o Senhor abençoe a todos, fiquem na paz de Deus.<file_sep>/_posts/2018-02-26-solus-christus.md --- layout: post title: Estudo dos solas:&nbsp; Solus Christus categories: devocionais image: soluschristus2.jpg logo: soluschristus2.jpg description: Parte 4 - Solus Christus - Atos 4:12 url: https://eltonsantos.github.io/devocionais/solus-christus/ author: <NAME> comments: true --- __"E em nenhum outro há salvação, porque também debaixo do céu nenhum outro nome há, dado entre os homens, pelo qual devamos ser salvos."__ Atos 4:12 <p class="intro"><span class="dropcap">P</span>az de Deus meus amados irmãos, hoje vamos estudar o penúltimo sola da reforma protestante, o solus Christus ou Somente Cristo. Como havia dito no estudo passado, quando se fala de fé, a questão não é acreditar, mas sim em quem acreditar? Devemos crer em Cristo Jesus, como diz em João 6:47: "Na verdade, na verdade vos digo que aquele que crê em mim tem a vida eterna". E é nisso em que se baseia esse sola, em crer em Jesus Cristo como nosso Salvador.</p> A reforma protestante do século 16 veio pra trazer de volta um retorno a leitura das Escrituras Sagradas, um retorno a Sã doutrina, um retorno a doutrina dos apóstolos e também um retorno ao cristianismo primitivo. Tudo isso por que naquela época havia muitos mediadores, as pessoas recorriam aos santos, as imagens, a Maria, como fazem ainda hoje. As pessoas terceirizavam a fé e as suas petições, as pessoas pediam ao seu santo favorito para que ele intercedesse por elas e levassem suas petições a Deus, fazendo um trabalho que na verdade é do Espírito Santo. No entanto, quando vamos para as Escrituras, a Bíblia nos informa com veemência, com clareza e em diversas passagens que há um só Deus, um só mediador entre Deus e os homens, que é Cristo. Não há possibilidade de ser qualquer outra pessoa, seja ela quem for, é um lugar único, indisputável e Ele confirma isso em João 14:6: __"Eu sou O caminho, A verdade e a A vida, ninguém vem ao Pai, senão por mim"__. Ele não diz: eu sou UM caminho, não, não, Ele deixa claro para qualquer entendedor que Ele é O ÚNICO caminho, não há como ir para Deus sem seguir Jesus Cristo. Ele não é uma das muitas verdades religiosas, Ele é A verdade e Ele não é uma vida entre várias, Ele é A vida. Irmãos, se queremos entrar pela porta estreita da salvação, nós devemos entrar com Cristo Jesus, nenhuma igreja pode nos salvar, nenhuma religião pode nos salvar, nossas obras, nossas bondades com o próximo, nada disso pode nos salvar se não for aquele que é poderoso para nos dá a salvação, Jesus Cristo. Devemos tomar cuidado para não nos corromper com as coisas do mundo, estamos vivendo em um mundo de pluralismo não somente de crer em vários deuses, mas também de caminhos pra irmos pro céu, como diz o ditado popular: "Há muitos caminhos pra Deus", não, esse ditado em nada tem a ver com o que diz a Bíblia, pois Jesus é o único caminho. Estamos vivendo tempos de relativismo, onde cada um faz como quer, dá maneira que quer, onde cada um diz que o importante é ser feliz, que o importante é o amor, que o importante é viver, que o importante é ter sinceridade e dessa forma vão ignorando a verdade absoluta que está contida nas Escrituras. Entretanto, não importa o que dizem, se quisermos verdadeiramente buscar a salvação, digo verdadeiramente, pois não basta simplesmente dizer que sim e continuar fazendo tudo o contrário e os nossos atos não condizerem com atitudes de pessoas transformadas, se quisermos a salvação, essa salvação está unicamente em Jesus Cristo. Somente Cristo, veio ao nosso mundo, se fez carne, habitou entre nós, andou por aqui como um peregrino e forasteiro habitando em terras desconhecidas, fazendo o bem, não pecando em nenhum momento, libertando os oprimidos e curando os enfermos com o Seu poder, somente Cristo foi à cruz, não como um mártir, mas para pagar os nossos pecados, para nos dá uma chance. Somente Cristo ressuscitou para a nossa justificação e tudo isso para que nós pudéssemos sermos sarados e perdoados e salvos. Somente Cristo, meus amados irmãos, fez tudo isso, nenhum outro homem, de qualquer outra religião, por mais histórico, conhecido ou famoso que seja, fez o que Ele fez por nós. Eles podem ter morrido, e morreram de fato, podem até terem sofrido antes de morrer, mas não pagaram, tão pouco perdoaram os nossos pecados, também não morreram por nós e muito menos ressuscitaram, me desculpe Francisco, me desculpe João, me desculpe Maria, <NAME>, Confúcio, mas somente Ele fez! Glorificado seja o nome do Senhor <NAME>. Irmãos, não vamos brincar com coisa séria, a Bíblia diz que ao homem está ordenado morrer uma só vez e após isso vem o juízo. Todos nós prestaremos conta de tudo isso que estamos fazendo na terra. Certa vez um homem disse assim para um pregador bastante conhecido chamado <NAME>: "Só Deus pode me julgar", então <NAME> virou pra ele e disse: "E você não tem medo disso?". Essa frase retrata bem que muitas vezes falamos que tememos a Deus, mas é somente da boca pra fora, porém a gente continua fazendo o que é errado aos olhos do Senhor, como inconsequentes e desatentos, como se sempre houvesse um amanhã para pedir perdão e nos arrepender. E aí se esse amanhã não mais chegar? Vamos ter que prestar contas com Deus de mãos vazias, pois realmente não temos nada a oferece-Lo, pois pela nossas obras, pela nossa religiosidade, nós não seremos salvos, a única condição que seremos salvos é crendo em Jesus, é somente Cristo que nos dá a salvação. Lembrem-se do carcereiro de Atos 16, que em momento de grande aflição perguntou a Paulo e Silas: "o que farei para ter a salvação?". Eles disseram: __"Crer no Senhor Jesus e serás salvo, tu e tua casa"__. É somente Cristo que pode nos salvar. Que o Senhor nosso Deus nos abençoe, nos guarde, nos proteja. Em nome de Jesus, amém!<file_sep>/_posts/2017-05-28-como-interpretar-a-biblia.md --- layout: post title: Como interpretar a Bíblia? categories: devocionais image: interpretar.jpg logo: interpretar.jpg description: Como interpretar a Bíblia? url: https://eltonsantos.github.io/devocionais/como-interpretar-a-biblia/ author: <NAME> comments: true --- ## Como interpretar a Bíblia? <p class="intro"><span class="dropcap">A</span>ntes de começar quero dizer que a <b class="red">Bíblia não é um livro comum</b> e a cada leitura Deus pode falar com cada um de nós de diferente formas, até mesmo em um único versículo. Tudo vai de acordo com a Sua soberania e vontade. No entanto, saber interpretar a Bíblia da maneira correta nem de longe tira a sua credibilidade, pois nos pontos fundamentais <b class="red">a Bíblia não se diverge, não se contradiz, é inerrante, infalível e inspirada</b>.</p> Existem algumas regras básicas para a sua melhor interpretação, vou citar 6 delas: ### Orar e trabalhar No latim existe uma expressão muito famosa que diz <b class="gray">**"Orare et Labutare"**</b>, que significa orar e trabalhar, pois é justamente isso a primeira regra básica para uma boa interpretação da Bíblia. **Por que orar?** Por que a Bíblia é divina e precisamos de discernimento espiritual dado pelo Espírito Santo para poder interpretá-la. **Por que trabalhar?** O trabalhar aqui significa estudar e deve ser feito por que a Bíblia possue uma gramática culta, ela é escrita numa linguagem humana e tem que ser estudada como se fosse estudar para um concurso. Ela tem que ser lida uma vez, outra e outra até ter entendimento pleno do capítulo lido, tem que ser grifada, analisada e comentada. Não adianta só orar, pedir pra Deus mostrar, revelar, dessa forma você fica uma pessoa mística, um monge. Porém, também não adianta só trabalhar, estudar, pois dessa maneira você vai virar um crítico, um teólogo seco, frio e que tudo relativiza. Tenha cuidado com essas duas extremidades, um crente precisa ter um equilíbrio dessas duas coisas. ### Saiba o tema/enredo da Bíblia A história central da Bíblia é a redenção. É o que Deus faz para o homem através da morte, ressurreição, ascenção e exaltação de Cristo Jesus. Sabendo desse tema central dá pra ter uma interpretação mais próxima do que está sendo lido de Genêsis 1 até Apocalipse 22. ### A Bíblia é a sua melhor intérprete Textos mais fáceis interpretam textos mais difíceis. Não há contradição na Bíblia, mas sim ausência ou carência de entendimento pleno do que está sendo exposto. Um bom exemplo, é o que a Bíblia relata em 1 Samuel 28, sobre o espiritismo, quando Saul foi consultar uma bruxa e "viu" Samuel. Esse texto é de difícil interpretação e pode dá margem a dizer que espiritismo é algo permitido ou que a Bíblia é a favor. Por causa disso, é muito mais fácil ler textos simples e diretos, como em Hebreus 9:27, que diretamente a Bíblia não dá margem alguma para o espiritismo e a reencarnação, como pode ser visto também em Deuteronômio 18:10-12. ### O antigo testamento é interpretado à luz do novo testamento Tenha o conceito do novo testamento em seu coração e assim a leitura do antigo será mais fácil. Exemplo: guardar dias, sacrifício... As leis cerimoniais do Antigo Testamento apontam às leis morais do novo testamento. ### O texto quer dizer o que o autor quer dizer O sentido natural tem que ser preferido ao sentido figurado, em outras palavras aquilo que se está lendo é de fato aquilo que aconteceu ou está sendo relatado, levando em conta o plano de fundo de todo o contexto histórico daquele texto. Sempre que ler dê preferência ao sentido literal/material a exceção é o sentido figurado. Um exemplo simples: Quando Jesus disse: Eu sou a porta(...). De fato, Jesus não é uma porta (sentido figurado), nesse sentido Ele quis dizer que é um método (material). ### O texto deve ser interpretado à luz do seu contexto Como diz uma expressão popular: <b class="gray">"Texto fora de contexto gera pretexto para uma heresia"</b>. O que é um contexto? Basicamente é pegar versículos anteriores e posteriores ao texto que está sendo lido e com isso definir o que o texto está dizendo. **Nunca traduza um texto sem seu contexto**. Exemplo: "Porque Deus amou o mundo de tal maneira que deu o seu Filho unigênito, para que todo aquele que nele crê não pereça, mas tenha a vida eterna." João 3:16 Deve ser interpretado com todo o seu contexto. Quem falou? Jesus. Pra quem falou? Nicodemus. Quem era a pessoa a quem estava sendo falado isso? Um príncipe dos fariseus. Qual época foi falado? O que Jesus quis dizer com isso... E assim você vai chegar a interpretação correta... Após tudo isso a probabilidade de chegar a interpretação correta do texto é muito grande. E é dessa maneira que devemos todos nós interpretar diariamente a Palavra de Deus. Amém!<file_sep>/js/script2.js (function(){ var $w = $(window); var $prog = $('.progress-indicator'); var wh, h, sHeight; function setSizes(){ wh = $w.height(); h = $('body').height(); sHeight = h - wh; } setSizes(); $w.on('scroll', function(){ var perc = Math.max(0, Math.min(1, $w.scrollTop()/sHeight)); updateProgress(perc); }).on('resize', function(){ setSizes(); $w.trigger('scroll'); }); function updateProgress(perc){ $prog.css({width : perc*100 + '%'}); } }());<file_sep>/_posts/2018-01-05-devocional-05-01-2018.md --- layout: post title: Devocional - 05/01/2018 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Devocional - 05/01/2018 - Lm 3:19-23 url: https://eltonsantos.github.io/devocionais/devocional-05-01-2018/ author: <NAME> comments: true --- __"Lembro-me da minha aflição e do meu delírio, da minha profunda amargura e do meu enorme pesar. Lembro-me bem disso tudo, e a minha alma desfalece dentro de mim. Todavia, quero trazer à memória aquilo que pode me dá esperança: As misericórdias do SENHOR são a causa de não sermos consumidos, pois as suas misericórdias são inesgotáveis e se renovam a cada manhã; grande é a tua fidelidade!"__ Lm 3:19-23 <p class="intro"><span class="dropcap">P</span>az de Deus meus amados irmãos, esse texto fala muito ao meu coração e quero meditar nele nessa manhã. Lembramos de quase tudo que passamos de bom e de ruim, claro, muitas vezes pelo tempo esquecemos dos detalhes, mas muitas coisas ficam em nossa mente e servem como lição, como exemplos bons ou ruins. Todavia, precisamos ocupar nossa mente com as coisas boas, com as boas lembranças.</p> Irmãos, se vocês estão lutando por alguma causa procurem trazer à memória aquilo que te dá esperança, vai lembrando daquilo que Deus já fez, vai lembrando daqueles momentos em que Deus já te ajudou. E se tá difícil e você não consegue lembrar de nada bom, a Bíblia te ajuda, lembrem-se dos milagres que Deus já tem feito, lembrem-se que Ele abriu o mar, lembrem-se que Ele já fez o sol parar, lembrem-se que Ele já ressuscitou mortos e venceu a morte! Portanto, não ocupemos nossa mente com as coisas ruins, **tragam a memória aquilo que pode dá esperança** para quando lembrarmos dos momentos de dor imediatamente lembrarmos também que o Senhor é misericordioso e é poderoso para entrar junto na nossa causa e conforme Sua vontade resolver a nossa peleja. Que Deus abençoe a todos!<file_sep>/_posts/2018-02-05-sola-scriptura.md --- layout: post title: Estudo dos solas:&nbsp; Sola Scriptura categories: devocionais image: solascriptura.jpg logo: solascriptura.jpg description: Parte 1 - Sola Scriptura - 2 Timóteo 3:14-17 url: https://eltonsantos.github.io/devocionais/sola-scriptura/ author: <NAME> comments: true --- __"Tu, porém, permanece naquilo que aprendeste, e de que foste inteirado, sabendo de quem o tens aprendido, e que desde a tua meninice sabes as sagradas Escrituras, que podem fazer-te sábio para a salvação, pela fé que há em Cristo Jesus. Toda a Escritura é divinamente inspirada, e proveitosa para ensinar, para redargüir, para corrigir, para instruir em justiça; Para que o homem de Deus seja perfeito, e perfeitamente instruído para toda a boa obra."__ 2 Timóteo 3:14-17 <p class="intro"><span class="dropcap">P</span>az de Deus, meus amados irmãos, hoje vamos iniciar um pequeno estudo sobre os 5 solas da reforma protestante e hoje vou falar sobre o Sola Scriptura ou Somente as Escrituras.</p> Apenas um breve resumo para recapitular o que foi a reforma protestante. A igreja católica vinha cada vez mais se distanciando das Escrituras, cometendo heresias atrás de heresias, vendendo indulgencias, perdoando pecados de acordo com o valor pago por eles dentre outras coisas e ai, em 1517, na Alemanha, um homem chamado Martinho Lutero não aguentando tanta heresia fixou na porta do castelo de Wittenberg as famosas 95 teses, essas teses eram uma crítica a tudo o que ia contra os ensinamentos de Deus e que destoava da Escrituras. Esse evento, que era pra ser somente em âmbito religioso, tomou proporções tão imensas que mudou todo o cenário da época, tanto econômico, quanto político e social e que deu início a tão famosa reforma protestante. A Bíblia é o livro mais publicado no mundo, foi concebido nos céus, nascido na terra, inspirado por Deus e escrito por homens piedosos. E este livro é a Palavra de Deus, é infalível, inerrante e suficiente. A Palavra de Deus é a nossa única regra de fé e prática, nos traz a mensagem da salvação, nos aponta pra Jesus e para o caminotracado por Deus para nossa redenção. Para vocês terem ideia, a nossa Constituição Federal é baseada em muitos preceitos bíblicos, mesmo sendo um país laico. É notório que nosso país seja de predominância cristã e a maneira que nossa Constituição foi escrita confirma isso. Em 1517 quando a reforma aconteceu na Alemanha, o primeiro pilar foi baseado em somente as Escrituras, nada de tradição humana ou posição de qualquer pensador ou opinião filosófica sobrepõe a verdade das Escrituras. Uma vez eu li em uma revista uma matéria que me chamou a atenção, o tema era "Deus sob ataque", a revista criticava Deus e a Bíblia, tentando inferiorizar a Bíblia e procurando tirar seu status de Sagrada, buscando provar que o que tem na Bíblia não é verdade. Porém as críticas sempre caem no descrédito, meus irmãos, a Palavra de Deus permanece vitoriosa por que não é palavra de homem, mas sim Palavra de Deus, pois Ele mesmo disse: céus e terras passarão, mas minha Palavra NUNCA passará. Este livro possui mais de 40 escritores, em circunstâncias diferentes, em épocas diferentes e não há contradição alguma, há uma harmonia plena em seu conteúdo. E esta é mais uma evidência de que por trás de todos os textos está a mente divina, qualquer obra humana escrita precisa de uma revisão, atualização, de um volume 2, 3, 4 para não se perder no tempo, eu trabalho com tecnologia da informação, os livros que eu estudava em 2012, por exemplo, já ficaram para trás, estão obsoletos, mas **a Palavra de Deus é infalível, inerrante e reproduz exatamente a mente divina**. Este livro possui inúmeras profecias, que se cumpriram e hão de se cumprir, literalmente. **O Deus que estava lá naquela época, está na nossa época também e Ele não muda, Ele é eterno**. E por isso que o cumprimento das profecias é outra prova infalível que esse livro é a Palavra de Deus. Um livro que transforma nações, cidades, que muda homens, que restaura famílias e o poder de Deus age por meio das Escrituras. Leia a Bíblia, estude-A, debruce-se nas Escrituras, mas não a estude apenas como se estuda um livro comum, pois **a Bíblia não é um livro comum**, ela é a Palavra de Deus, portanto, peça orientação ao Senhor, peça que Ele te guie durante a leitura e estudo, peça que Ele se revele e peça discernimento para que Ele fale não aquilo que você quer ouvir, mas sim aquilo que Ele quer falar. E também não a estude como um livro mágico, onde você bota o dedo e ai vem uma palavra mística. A Bíblia também é um livro que precisa ser corretamente estudado e interpretado na dependência do Espirito Santo de Deus para que possamos entender a mensagem central desse livro e a mensagem central é <NAME>, o nosso Salvador. **Somente as Escrituras**, um dos solas da reforma protestante, examine esse livro, estude, medite na Palavra de Deus dia e noite, ele está a nossa disposição, no nosso idioma e quando lemos a Bíblia, é Deus que vem falar conosco. Quer viver a vida em santidade? Quer ter uma vida de obediência e fidelidade a Deus? Quer ter uma vida vitoriosa? Examine a Palavra de Deus, pois ela que direciona e norteia os nossos passos, conduzindo-nos a uma vida plena e cheia do Espírito Santo. Que Deus abençoe a todos.<file_sep>/_posts/2017-07-31-amanha-pode-ser-tarde-demais.md --- layout: post title: Amanhã pode ser tarde demais categories: devocionais image: amanha.jpg logo: amanha.jpg description: Amanhã pode ser tarde demais - Lucas 16:19-31 url: https://eltonsantos.github.io/devocionais/amanha-pode-ser-tarde-demais author: <NAME> comments: true --- "Ora, havia um homem rico, e vestia-se de púrpura e de linho finíssimo, e vivia todos os dias regalada e esplendidamente. Havia também um certo mendigo, chamado Lázaro, que jazia cheio de chagas à porta daquele; E desejava alimentar-se com as migalhas que caíam da mesa do rico; e os próprios cães vinham lamber-lhe as chagas. E aconteceu que o mendigo morreu, e foi levado pelos anjos para o seio de Abraão; e morreu também o rico, e foi sepultado. E no inferno, ergueu os olhos, estando em tormentos, e viu ao longe Abraão, e Lázaro no seu seio. E, clamando, disse: Pai Abraão, tem misericórdia de mim, e manda a Lázaro, que molhe na água a ponta do seu dedo e me refresque a língua, porque estou atormentado nesta chama. Disse, porém, Abraão: Filho, lembra-te de que recebeste os teus bens em tua vida, e Lázaro somente males; e agora este é consolado e tu atormentado. E, além disso, está posto um grande abismo entre nós e vós, de sorte que os que quisessem passar daqui para vós não poderiam, nem tampouco os de lá passar para cá. E disse ele: Rogo-te, pois, ó pai, que o mandes à casa de meu pai, Pois tenho cinco irmãos; para que lhes dê testemunho, a fim de que não venham também para este lugar de tormento. Disse-lhe Abraão: Têm Moisés e os profetas; ouçam-nos. E disse ele: Não, pai Abraão; mas, se algum dentre os mortos fosse ter com eles, arrepender-se-iam. Porém, Abraão lhe disse: Se não ouvem a Moisés e aos profetas, tampouco acreditarão, ainda que algum dos mortos ressuscite." *Lucas 16:19-31* <p class="intro"><span class="dropcap">P</span>az de Deus a todos, meus amados irmãos, a reflexão que Deus colocou hoje em meu coração é sobre o fato de amanhã ser tarde demais para servir a Jesus Cristo. Conforme essa passagem mostra, o rico teve tudo na vida, tinha coisas boas nessa terra, enquanto o pobre era desprezado e não possuía nada. O rico teve inúmeras oportunidades para se arrepender, pedir perdão, se humilhar aos pés da cruz, amar seu próximo e servir a Deus, porém ele sempre foi deixando para amanhã por não ter tempo, para amanhã por está ocupado demais, para amanhã por está feliz demais, para amanhã por está cansado demais, para amanhã e amanhã até não ter mais esse amanhã... e aí irmãos, a Bíblia diz que só é permitido ao homem morrer uma única vez, depois disso vem o juízo e nessa hora não tem mais volta, não tem mais possibilidade de se arrepender.</p> Amados irmãos, um time que perde hoje, pode ganhar amanhã; um general que perde uma batalha hoje, pode ganhar outra batalha e até mesmo a guerra amanhã; uma pessoa que não passa em concurso hoje, pode ser aprovado em outro amanhã... Nas coisas comuns da vida, nós provavelmente temos outras chances, no entanto em relação a vida e a morte, não há outra chance... Em **Hebreus 3:7-8** diz assim: "Se ouvirdes hoje a voz do Espírito Santo, não endureçais os vossos corações". Irmãos, hoje é um dia oportuno, não há esperança depois da morte, pois não existe reencarnação e também não existe purgatório. Após a morte, temos que dar conta da nossa vida a Deus, com tudo que fizemos e deixamos de fazer. Nós tendemos a imaginar que temos tanto tempo pela frente, uma longa estrada, pois somos jovens, saudáveis na medida do possível, temos recursos e não encontramos tempo de cuidar de algo tão importante: a nossa alma. Sempre imaginamos que podemos cuidar disso quando estivermos mais velhos. Porém, como a morte nunca avisa a hora que vai chegar, a pergunta que fica é: Se morremos hoje, estaríamos preparado para encontrarmos com Deus? Estaríamos certo, seguros, perdoados e saberíamos onde a gente iria? Falando por mim, eu não tenho certeza pra onde eu iria, falo com convicção. Eu ainda preciso buscar muito mais a Deus do que tenho feito. Ainda me falta um longo caminho. E por conta disso, não devemos deixar pra pensar nisso em última hora, pois pode ser que nem tenhamos mais chances no futuro. Quantos perdem a vida em acidentes repentinos que não da tempo de acertar os caminhos e quantos perdem a vida em mortes súbitas que não da tempo de consertar o que foi feito antes, pensaram isso? Então, realmente, amanhã pode ser tarde demais. Se está precisando perdoar alguém, perdoe! Se fez mal a alguém, peça perdão, arrependa-se, humilhe-se, conserte a tua vida enquanto é tempo. É por isso que Deus nos exorta inúmeras vezes que hoje é o dia oportuno, enquanto temos tempo para nos arrepender, enquanto temos tempo para fazer diferente, enquanto temos tempo para acertar a vida com nossa família e com nosso próximo, pois nós não administramos o amanhã. É necessário aproveitar logo essa oportunidade e por a vida e o coração naquele que não somente pode perdoar nossos pecados, mas também pode nos dá a salvação. Por fim amados, deixar pra depois é uma loucura e irresponsabilidade, não deixe de se voltar logo para Deus, não importando quão grave são as transgressões que tenhamos cometido contra Ele, contra o próximo, contra família. Deus é rico em perdoar e tem prazer na misericórdia. Voltemos pra Deus enquanto é tempo, por favor. Em nome de Jesus, vamos nos arrepender de tudo o que fizemos e seguir firmes nos caminhos do Senhor. Que Deus abençoe a todos. Amém!<file_sep>/_posts/2018-02-19-sola-fide.md --- layout: post title: Estudo dos solas:&nbsp; Sola Fide categories: devocionais image: solafide.jpg logo: solafide.jpg description: Parte 3 - Sola Fide - Romanos 5:1 url: https://eltonsantos.github.io/devocionais/sola-fide/ author: <NAME> comments: true --- __"Tendo sido, pois, justificados pela fé, temos paz com Deus, por nosso Senhor Jesus Cristo"__ Romanos 5:1 <p class="intro"><span class="dropcap">P</span>az de Deus a todos, meus amados irmãos, dando continuidade ao nosso estudo dos solas da reforma protestante, hoje falaremos sobre o Sola Fide ou Somente a Fé.</p> Quero começar o estudo com a perguntinha básica: **O que é fé?** De acordo com o dicionário, o conceito formal de fé é uma palavra que significa "confiança", "crença", "credibilidade". É um sentimento de total crença em algo ou alguém, ainda que não haja nenhum tipo de evidência que comprove a veracidade da proposição em causa. Agora indo para as Escrituras, quando é usada a palavra fé, eegeralmente ela está mostrando uma entrega e confiança em Deus, acreditando no que Ele diz e no que Ele fez. Também é preciso entender que fé não é sinônimo de "pensamentos positivos bonitinhos", de que "o futuro é lindo", de que é "tudo de bom", que "Deus vai salvar todo mundo", como dizem por aí, não, não, fé é crer em Jesus, **fé é confiança no Deus todo poderoso**, em Sua obra, independente de qualquer coisa e o mais importante, pra ser salvo. Um outro termo sinônimo ao termo "Somente a fé" que também é muito se falado é o da doutrina da justificação pela Fé. Aqui é muito estudado o como uma pessoa pode ser salvo, com que meios, de que forma, fazendo o que? Antes de tudo, a salvação não é pelas obras, ponto. Não é pela pessoa fazer caridade, ajudar o necessitado que será salvo, por melhor intencionada que a pessoa seja, não é com isso que uma pessoa é salva, pois não é para ninguém se gloriar em seu coração e dizer que fez por merecer, que é justo ou não ter alcançado a salvação por ter feito algo. Irmãos para a salvação, não existe espaço para a soberba, para se achar melhor ou superior a alguém, se alcançássemos Deus através de algum esforço, então esse ato já daria margem para a vangloria, para me sentir "por cima da carne seca", mas como o ato de ser salvo e alcançar Deus é unicamente pela Fé, logo não temos como nos sentir melhor do um ou outro, o que nós temos é que temer, é imaginar estarmos diante de um Deus que tem o poder de nos salvar e saber que não cumprimos o que Ele nos ordena, apenas recebendo a Sua graça pela fé. Um outro ponto que quero comentar é sobre as cartas de Paulo e o livro de Tiago. Muitas pessoas, e eu também me incluía no meio até um tempo atrás, se confundem e interpretam de forma errada quando estão lendo esses livros, pois um diz que é somente a fé e o outro fala de fé e obras, aí como assim, a pessoa já entra em pânico e fica logo agoniada, fica parecendo até que Paulo e Tiago entram em contradição, mas na realidade não há contradição alguma, apenas existe uma perspectiva, uma visão diferente de um mesmo ponto. Para citar como referência temos de um lado _Romanos 1:16-17_ que diz: **"Porque não me envergonho do evangelho de Cristo, pois é o poder de Deus para salvação de todo aquele que crê; primeiro do judeu, e também do grego. Porque nele se descobre a justiça de Deus de fé em fé, como está escrito: Mas o justo viverá pela fé."** E do outro lado temos _Tiago 2_, capítulo completo que fala sobre as obras. Ora, enquanto Paulo olha pra causa da salvação e diz: é somente pela fé, Tiago olha para a consequência da salvação e diz: nós somos justificados pela fé e as evidências são as obras, ponto. Acho que fica claro, depois que conseguimos chegar nesse entendimento. Irmãos, agora, vejam bem, se você diz eu creio, mas não ajuda os necessitados, eu creio, eu tenho fé, mas não estende a mão para ajudar ninguém, isso é uma fé falsa, pois a fé verdadeira nunca vem sozinha, ela sempre caminha junto se evidenciando nas obras. Não é aquele tipo de fé "preguiçosa" que quer somente encontrar respaldo nessa palavra e ficar sem fazer nada pra Deus. A Bíblia deixa bem claro inúmeras vezes que não tolera preguiçosos e pessoas que não mexem uma palha para ajudar a fazer o Seu Reino avançar. De novo, para fixarmos: não é dizer sou salvo graças a fé com mais as obras, não, não, somos salvos apenas pela fé e nela sendo evidenciada, demonstrada e comprovada pelas boas obras. Outra coisa quem tem fé, precisa ter fé em alguma coisa, correto? Então essa fé é fé em quem ou em que? Para uma pessoa a fé pode ser nele mesmo, pode ser em algum santo, numa imagem, na virgem Maria, no buda, nos orixas, em Alá, no papa, logo, a gente ver que fé por fé, todo mundo tem, a questão não é acreditar, mas sim em quem acreditar? E devemos crer em Cristo Jesus, assim como diz em João 6:47: **"Na verdade, na verdade vos digo que aquele que crê em mim tem a vida eterna"**, mas isso vamos estudar na semana que vem. Fé não é sentimento intelectual, sentimento emocional, conhecimento humano, fé é dom de Deus, é dada, é concebida por Deus, fé é quando você tem um problema, não tem solução humana e você transfere pra Jesus Cristo, é quando você tem confiança em quem você é e o que você faz e você transfere essa confiança para Jesus Cristo, sabendo que Ele sabe todas as coisas e sabe o que é melhor para você. Irmãos, como diz e Efésios 2:8, fé é dom de Deus, nós não geramos fé, não a criamos, é Ele quem nos dá, a nossa fé é passiva, a gente só a recebe, Deus nos dá a fé Nele. Glorificado seja o nome do Senhor. Para concluirmos, é pela Fé e pela Fé somente que somos salvos. E e que fé mais obras caminhando separadamente não condiz exatamente com o que diz nas Escrituras. **Adicionar algum elemento à fé em Jesus como cobrança para a salvação te separa da verdadeira fé**. Amém? Que Deus abençoe a todos!<file_sep>/_posts/2017-11-26-eu-estou-morrendo.md --- layout: post title: Eu estou morrendo! categories: devocionais image: morrendo1.jpg logo: morrendo1.jpg description: Eu estou morrendo - Juízes 16:20 url: https://eltonsantos.github.io/devocionais/eu-estou-morrendo/ author: <NAME> comments: true --- __"E disse ela: Os filisteus vêm sobre ti, Sansão. E despertou ele do seu sono, e disse: Sairei ainda esta vez como dantes, e me sacudirei. Porque ele não sabia que já o Senhor se tinha retirado dele."__ **Juízes 16:20** <p class="intro"><span class="dropcap">P</span>az do Senhor meus amados irmãos, antes de começar a pregação o tema "Eu estou morrendo" não significa que estou morrendo de fato, eu creio que não, pois eu acredito que somos imortais até que todos os planos de Deus se cumpram em nossas vidas e Deus ainda tem muito o que se fazer em minha vida e enquanto não se cumpre, carrego comigo os Seus sonhos. Esse tema foi de uma pregação do Defesa do Evangelho e através dela Deus falou bastante comigo, me constrangeu para que eu pudesse compartilhar com os irmãos os ensinamentos.</p> Esse texto retrata a história de Sansão, um texto bastante conhecido das Escrituras, Sansão era um juiz que Deus levanta para guardar, julgar e proteger Seu povo. Sansão é uma figura diferente e o que aconteceu com ele foi bastante peculiar. Diz a Bíblia que todos os homens que Deus usava Ele os enchia com o Espírito Santo, seja eles profetas, juízes, apóstolos... e ao serem cheios do Espírito Santo, eles eram usados para pregar, levar o Evangelho a todos os lugares, fazerem cura, sinais, profetizarem, etc. No entanto, quando Sansão está cheio do Espírito Santo ele ganha uma força descomunal, ele é o único homem da Bíblia que esse dom tem um efeito físico, se apoderava dele uma enorme força, que se refletia nos seus músculos de tal forma que Sansão conseguia pegar um leão e rasgá-lo ao meio, ele matava mil homens com uma queixada de jumento, ele pegava sozinho coisas tão pesadas que vários homens não conseguiriam pegar. Então, Sansão tinha essa peculiaridade, quando o Espírito Santo de Deus vinha sobre ele, se refletia no seu físico e por isso ele era conhecido, como um homem muito forte que protegia Israel usando sua força física. Para que Sansão mantivesse o Espírito Santo sobre si, ele tinha que manter o voto de nazireato, em números 6:1-21 fala sobre o que consiste e as proibições desse voto, algumas proibições desse voto eram: não tocar em cadáveres, apartar de qualquer bebida forte, não podia se prostituir e nem cortar os cabelos. Entretanto Sansão vai quebrando esses votos um a um, mentindo, bebendo, se prostituindo, assassinando muitas pessoas que não precisaria ter matado e além disso, casa-se com Dalila, uma ímpia, filisteia, que era um dos principais inimigos de Israel. A Bíblia fala que toda vez os filisteus tentavam pegar Sansão, pois ele era como um herói de Israel, um super homem, e de fato ele era. Fazendo analogia, era como em um jogo de vídeo game, por exemplo, pra um time consegui ganhar de outro mais facilmente o adversário sempre foca seus ataques no herói, justamente para conseguir eliminá-lo logo e ter menos trabalho, e é por isso que enquanto Sansão estivesse pelo caminho, os filisteus não conseguiriam fazer mal contra o povo de Deus, então eles tentavam matar Sansão fazendo alguma emboscada e sempre eram em vão, pois Sansão se sobressaia contra eles todas as vezes. Após algum tempo, despreocupado, deitado no colo de Dalila, já com o cabelo cortado e tendo revelado todo o segredo a ela, os filisteus novamente vem sobre ele. Aí ela fala: os filisteus vêm contra ti! E ele já farto, acostumado e apático dessa situação de sempre se dar bem, pensa numa boa: ah, sairei como das outras vezes e me livrarei... Só que dessa vez acontece algo diferente, ele não consegue se livrar, dessa vez não, os filisteus o pegam, arrancam seus olhos, prendem-o, jogam-o dentro de um calabouço e fazem-o girar moinho no cárcere. Afinal, o que que aconteceu de diferente? Qual o fator predominante da queda, da ruína de Sansão? Quando os filisteus vem contra ele, acontece tudo repentinamente até uma certa parte. E a Bíblia relata esse texto que é muito profundo e também muito triste: "Porque ele não sabia que já o Senhor se tinha retirado dele", em outras traduções: "Mal sabia Sansão que o Espírito de Deus havia se ausentado dele". Sansão estava morrendo... Meus irmãos, o que eu temo é que estejamos na mesma situação que Sansão, que o Espírito esteja se ausentando, se apagando e nós não estamos nem percebendo esse fato triste acontecendo. Eu não acredito que o Espírito Santo se ausente completamente de nós, nos abandone e nos desampare, mas eu acredito no que ta em 1 Tessalonicenses 5:19, que por causa do pecado o Espírito Santo vá se apagando, se extinguindo e se entristecendo. De forma que a gente fique temporariamente sem as operações Dele em nossa vida. E faz muito sentido com o que ocorreu na vida de Sansão. Irmãos amados, será que nós não estamos pregando, orando, lendo, lutando contra o mal, contra o pecado, sem a influência e poder do Espírito Santo? Será que não é a ausência do Espírito Santo a resposta de que nossas orações estão sendo cada vez mais curtas, menos intensas e com menos quebrantamento e qualidade? E será que não é por causa disso que estamos cada vez menos lendo a Bíblia, estudando a Bíblia, compreendendo aquilo que estamos lendo? Será que tudo isso não foi gerado por causa do minguar do Espírito? O número de pecados tem aumentado e juntamente com o número também a densidade, a gravidade tem aumentado será que isso não é por causa da ação mortificadora do Espírito que está se extinguindo? Irmãos, estamos vivendo em uma geração mais carnal de todos os tempos, uma natureza que é inclinada pro mal, pra naturalidade, pra soberba, pro materialismo e constantemente estamos em guerra velha natureza (carne) x nova natureza (espírito), onde devemos sempre fazer com que o Espírito se sobressaia em relação a carne, agora uma vez que o Espírito está se extinguindo, ficando fraco, entristecido, a velha natureza vai predominar em nós e acontecendo isso não haverá muita diferença entre o crente que tem as ações do Espírito extintas e o ímpio que não possui o Espírito, pois em ambos o que predomina é a carne. Voltando pro texto, quando Dalila avisa Sansão sobre os filisteus primeiro ele diz assim: "Sairei dessa vez como antes", ele quis dizer que sempre saiu assim, que sempre usou esse artifício, que sempre usou desse poder. Sabem o que significa? Pregarei dessa vez como antes, evangelizarei dessa vez como antes, falarei com minha esposa como antes, lidarei com essa situação como antes, vou na vigília como antes, vou orar como antes, vou desviar meus olhos daquela mulher como antes, vou ler a Bíblia como antes, vou preparar um sermão que vai converter aquela pessoa como antes e podemos dá vários exemplos de como antes eram as coisas, como se fosse no primeiro amor. Sansão basicamente falou: "Eu vou matar os filisteus com os mesmos recursos que eu usei antes". E em seguida ele diz: "E me livrarei" ou seja, e isso funcionará. Quer dizer que: quando eu pregar o pecador se converterá, quando eu evangelizar muitos aceitarão a Cristo, quando eu falar minha esposa se comoverá, quando eu fizer um jejum eu concluirei, o voto que fiz eu cumprirei. Era essa a total confiança que tinha Sansão, da mesma forma conosco. Porém, como é dito várias vezes em Cavaleiros do Zodíaco, um golpe não funciona duas vezes contra o mesmo cavaleiro, assim foi na vida de Sansão, quando o Espírito Santo retirou-se dele. Os filisteus o pegaram e fizeram atrocidades com Sansão. Ele não conseguiu se livrar. Que isso quer dizer? Dessa vez a pregação não teve eficácia, dessa vez o sermão foi um fracasso, dessa vez o evangelismo não resultou em nenhuma alma convertida, dessa vez não conseguiu salvar seu casamento, dessa vez caiu em tentação, dessa vez não conseguiu ler a Bíblia por nem 5 minutos, dessa vez não conseguiu desviar os olhos daquela mulher e tudo isso por que o Espírito Santo não estava mais com ele. Irmãos, nós sem o Espírito não somos nada, não somos ninguém. Tudo o que fazemos de virtuoso a causa é o Espírito Santo, sem ele nós somos inúteis naquilo que nos propomos a fazer. O detalhe mais triste do texto não é Sansão ter sido pego e o Espírito ter se ausentado, mas sim o fato dele não saber, não perceber, não ver que ele não estava mais com o Espírito Santo. Irmãos, nós podemos ter perdido as ações beneficiadoras do Espírito sem mesmo percebermos isso, isso pode acontecer conosco. E voltando a indagação do começo, será que não estamos vendo isso acontecer em nossas vidas? Notem que no texto Sansão não fica imobilizado por causa da ausência do Espírito, não, Ele se levantou, saiu, foi lutar e pensava que iria vencer. Tudo isso Ele fez sem o revestimento da armadura de Deus, a plenitude do Espírito. Será que nós não estamos no mesmo caminho? Enfrentando o dia, enfrentando a noite, enfrentando a carne, continuando com as atividades normais, continuando com nossa rotina, mas sem a parte do Espírito Santo, sem a força, sem a graça? Vejam que sem o Espírito, Sansão sofre uma tremenda derrota, foi golpeado, nocauteado e tomou de 7x1 do adversário (rsrs). Lutar contra as setas do inimigo, as ciladas de satanás, contra o pecado, contra a soberba, contra a concupiscência da carne, concupiscência dos olhos, a inclinação, é derrota na certa, pois não não obteremos vitória, êxito, triunfo sob nenhuma dessas coisas estando na carne. Irmãos, entendam que o fato de o Espírito se retirar de uma pessoa não impede que a pessoa continue sua vida normal, o fato do Espírito se extinguir não impede a pessoa de continuar suas atividades. É uma morte lenta e imperceptível. Como Saul, por exemplo, em 1 Samuel 16:14, "E o Espírito do Senhor se retirou de Saul, e atormentava-o um espírito mau da parte do Senhor.", Saul não deixou de ser rei, ele continuou a reinar, continuou a guerrear, mas tudo na carne, sem orientação, revelação, instrução divina e o resultado foi o fracasso total, pecados, derrotas, crimes. E isso pode acontecer com pastor, obreiros, evangelistas, com qualquer um, andar na carne sem a unção do Espírito. Apesar do receio de muitos falarem que não, que satanás não pode ter acesso em nossa vida e muitas igrejas dizerem que basta falar com autoridade "ta amarrado", "ta repreendido", isso pode acontecer sim, aconteceu com Saul, um rei de Israel, descendente de Abraão, da linhagem dos patriarcas, e mesmo assim a Bíblia diz que um espírito mau o influenciou, o seduziu para tentar contra Davi. Veja também o que aconteceu com Pedro que, na sua covardia, deu brecha para satanás o usar para querer atrapalhar os planos de Deus em Mateus 16:22, onde diz: "E Pedro, chamando-o a parte, começou a reprová-lo, dizendo: Tem compaixão de ti, Senhor; isso de modo algum te acontecerá". Será que satanás não pode ter acesso a nossas vidas uma vez que o Espírito Santo está se retirando e não está atuando? Tanto Sansão, quanto Saul e Pedro nem perceberam que um saiu e o outro entrou, pois o discernimento que é concebido a nós através do Espírito foi retirado. E aí meus amados? A queda é certa e a derrota garantida. Amados, **sem o Espírito Santo é fato que não somos nada**, mas será que nós notaríamos alguma diferença se ficássemos sem Ele por 1 mês, 1 semana, 1 dia? Se ficássemos sem Ele no nosso trabalho, em casa, andando na rua, evangelizando, pregando, não estaríamos nem aí ou no primeiro dia nós cairíamos de joelhos que nem Davi vez no Salmo 51:11, "Não me lances fora da tua presença, e não retires de mim o teu Espírito Santo", tamanha era a comunhão de Davi. Nós precisamos e deveríamos ter tanta dependência do Espírito Santo que se ficássemos um segundo sem Ele éramos pra cair duro no chão e morrer. A palavra é **Santificação**, se o Espírito Santo tiver extinto, não conseguiremos nos santificar. Romanos 8:13 "Porque, se viverdes segundo a carne, morrereis; mas, se pelo Espírito mortificardes as obras do corpo, vivereis e Gálatas 5: "Porque a carne cobiça contra o Espírito, e o Espírito contra a carne; e estes opõem-se um ao outro, para que não façais o que quereis". Veja irmãos, a carne é a nossa natureza, a nossa natureza quer fazer tudo o que é de ruim aos olhos de Deus, a nossa natureza quer trair a esposa, a nossa natureza quer mentir, a nossa natureza quer ver pornografia, a nossa natureza quer dinheiro ilícito, quer julgar e condenar o próximo, quer virar as costas aos necessitados e se o Espírito se extinguir, a nossa natureza prevalecerá. E a causa de tudo isso, de ser mundano, materialista, egoísta, mentiroso, infiel, intolerante, inconsequente, carnal, essas características imundas vem por consequência de que o Espírito Santo tem se retirado de sua vida e eu digo com propriedade no assunto de que aconteceu na minha própria vida. A inclinação da carne é inimiga de Deus. Irmãos, Sansão não sabia a causa de ta fazendo as coisas que ele estava fazendo, não sabia que sua carne estava predominando. Para encerrarmos, qual a saída diante disso tudo meus irmãos? Indo para Juízes 16:28, encontraremos a resposta: "Então Sansão clamou ao SENHOR, e disse: Senhor DEUS, peço-te que te lembres de mim, e fortalece-me agora só esta vez, ó Deus, para que de uma vez me vingue dos filisteus, pelos meus dois olhos". O que Sansão teve que fazer para ter o poder novamente? Sansão clamou o nome do Senhor! Sansão reconheceu que Deus tinha se esquecido dele, percebeu que Deus havia se apartado dele, e agora ele assumiu que era um fracasso, que era um derrotado e um zé ninguém sem a presença de Deus! A primeira coisa que devemos fazer para sair dessa situação é reconhecer que não temos isso tudo que pensamos ter, que não somos a "última coca cola do deserto", que não estamos no nível em que pensamos está. Precisamos reconhecer e dizer: Senhor, eu te perdi, eu te entristeci de tal maneira que o Senhor me deixou e eu pequei contra ti, entristeci o teu Santo Espírito e estou agindo na carne, no meu poder financeiro, na força do meu braço, agindo com a minha influência que eu pensava ter, agora Senhor, lembra-Te de mim, ó Deus e trás de volta o Teu Santo Espírito! Essa é **a primeira etapa: a conscientização**. **A segunda coisa é buscar**, buscar mais uma vez, buscá-Lo novamente, se esforçar, ter compromisso e perseverança em busca da presença de Deus, buscar ser cheio do Espírito, buscar e pedir em oração porções maiores, sair da passividade e pedir ao Senhor a alegria da salvação! **A terceira e última coisa está em 1 Tessalonicenses 5:19: "Não extingais o Espírito"**. Ou seja, cative-O, cative-O para que Ele nunca mais se apague, regue-O, nutra o Espírito de coisas boas meus irmãos para que nunca mais venhamos passar o que estamos passando, para que essa perda nunca mais venha a acontecer, para que essa crise nunca mais torne a voltar. Passemos a madrugada em oração se preciso for, jejuemos se preciso for, façamos qualquer coisa, mas não extingais o Espírito Santo. Irmãos quantos cultos nos sentimos fortalecidos, vigorosos e dizemos: Agora vou mudar minha vida, agora vou tomar um novo rumo, agora vou mudar minha história... passou... Quantas vezes já falamos: Agora viro homem de Deus, agora eu sou do Senhor! Passou... Por que passa, por que isso é tão temporário? Por que nós não regamos diariamente! Não cativamos como é pra ser cativado. Meus irmãos cativem o Espírito, jejuemos hoje, amanhã, semana que vem de novo, acordemos de madrugada pra orar, pra meditar nas Escrituras e se fizermos isso cada dia que passar estaremos mais fortes, mais cheios d Espírito e mais preparados para enfrentar esse mundo carnal. Em nome de Jesus, sejamos cheios do Espírito Santo de Deus! Amém!<file_sep>/_posts/2017-12-14-devocional-14-12-2017.md --- layout: post title: Devocional - 14/12/2017 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Devocional - 14/12/2017 - Isaías 53:10 url: https://eltonsantos.github.io/devocionais/devocional-14-12-2017/ author: <NAME> comments: true --- __"Todavia, ao Senhor agradou moê-lo, fazendo-o enfermar; quando a sua alma se puser por expiação do pecado, verá a sua posteridade, prolongará os seus dias; e o bom prazer do Senhor prosperará na sua mão."__ Isaías 53:10 <p class="intro"><span class="dropcap">P</span>az de Deus a todos!! No devocional de hoje venho falar como as pessoas são contraditórias quando o assunto envolve relacionamentos.</p> As pessoas separam-se por não crerem que Deus pode restaurar seu casamento, mas creem que Deus pode lhes dar um outro casamento melhor, como pode ser isso? Isso na prática é duvidar do poder imensurável que Deus pode fazer todas as coisas segundo Sua boa, perfeita e agradável vontade. Se permitirmos que a situação tire a visão do poder restaurador de Deus, teremos uma vida de sucessivos fracassos, o diabo será exaltado por isso, o mundo perderá suas esperanças e o evangelho será desacreditado, e creio que você não quer fazer parte desse sistema. Então LUTE! PERSEVERE! ORE! Minha mãe tinha motivos humanos de sobra para separar-se, mas preferiu sofrer e perseverar durante anos, isso resultou em salvação de almas e exemplo pra esse mundo caído e incrédulo. Na sua dor Deus foi glorificado e lhe trouxe uma alegria imcomparável. CADA CASO É UM CASO, MAS PRA TODOS OS CASOS UMA SÓ SOLUÇÃO: O PODER DE DEUS EM CRISTO, VENCENDO O INIMIGO DA SUA VIDA, PRA SEMPRE. SE CRISTO TIVESSE RECUADO E RECUSADO A NOIVA, O QUE SERIA DE MIM E DE VOCÊ? O QUE SERÁ DOS SEUS, SE VOCÊ DESISTIR AGORA? SEI QUE É DIFÍCIL, MAS O SEU CHORO DEUS CONVERTERÁ EM ALEGRIA, NÃO ALEGRIA DO MUNDO, MAS ALEGRIA DOS CÉUS, VERDADEIRA, VIVA E ETERNA. Creia no poder de Deus meus irmãos! Que O Senhor abençoe a todos, amém!<file_sep>/_posts/2017-08-29-reflexao-sejamos-biblicos.md --- layout: post title: Reflexão - Sejamos Bíblicos! categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Reflexão - Sejamos Bíblicos - Mateus 22:29 url: https://eltonsantos.github.io/devocionais/reflexao-sejamos-biblicos/ author: <NAME> comments: true --- __"Jesus, porém, respondendo, disse-lhes: Errais, não conhecendo as Escrituras, nem o poder de Deus."__ **Mateus 22:29** <p class="intro"><span class="dropcap">E</span>stava conversando em um dos grupos cristãos que tenho no meu WhatsApp e uma coisa me chamou a atenção e sentir que Deus colocou no meu coração pra fazer uma breve reflexão sobre o assunto.</p> Irmãos, eu vejo muita gente que gosta de bater em falso profeta, falar mal de heresias, dar porrada em neo pentecostais, fazer parte de movimentos de desigrejados, ser radical, ser contra as igrejas por achar que tudo é heresia. E isso me chamou a atenção durante as conversas, eu li vários irmãos falando muito mal de certas igrejas, somente apontando erros e nada de dá uma solução. Isso me chamou a atenção por que tem um problema nessa gente que só quer atacar outras igrejas: eles odeiam os falsos profetas, porém também não amam a Deus, não amam a santidade. Só gostam de apontar, mas esquecem de ir atrás e ensinar o que é certo. É muito fácil você falar mal de alguém que não está pregando o evangelho corretamente apontando o dedo pra essas pessoas, mas ir procurar os irmãos pra ensinar o evangelho genuíno isso é difícil. E a verdade é que maior do que você querer esculachar falsos profetas é você buscar santidade. Muitos crentes atacam falsos profetas, porém não são verdadeiramente regenerados, não nasceram de novo. A maior preocupação dessas pessoas está em remover os falsos profetas do mundo ao invés de buscar verdadeiramente a santidade, de ir buscar a salvação. Irmãos, antes de se levantar uma bandeira para sair pra evangelizar e fazer missões, falar algo contra as heresias e falsos profetas, primeiro: <b class="red">Se converta, busque a regeneração e estude a doutrina da salvação</b>! Antes de salvar a outros, examine-se, pois, a si mesmo se está salvo. Não adianta querer combater o mal se você não estiver revestido da armadura de Deus que inclui, sobretudo, o <b class="red">[capacete da salvação](https://eltonsantos.github.io/devocionais/a-verdadeira-batalha-espiritual/)</b>... Não adianta querer salvar o mundo todo se nem conhecemos as Escrituras e o poder de Deus, isso é um erro, conforme está escrito no versículo lido. Que Deus possa nos abençoar para que possamos conhecer mais as Escrituras e ocuparmos o nosso tempo com coisas mais importantes do que combater falsos profetas. Que possamos nos preocupar com nossa salvação e pregar o verdadeiro Evangelho. Amém!<file_sep>/_posts/2018-03-05-soli-deo-gloria.md --- layout: post title: Estudo dos solas:&nbsp; Soli Deo Gloria categories: devocionais image: solideogloria3.jpg logo: solideogloria3.jpg description: Parte 5 - Soli Deo Gloria - Romanos 11:33-36 url: https://eltonsantos.github.io/devocionais/soli-deo-gloria/ author: <NAME> comments: true --- __"Ó profundidade das riquezas, tanto da sabedoria, como da ciência de Deus! Quão insondáveis são os seus juízos, e quão inescrutáveis os seus caminhos! Por que quem compreendeu a mente do Senhor? ou quem foi seu conselheiro? Ou quem lhe deu primeiro a ele, para que lhe seja recompensado? Porque dele e por ele, e para ele, são todas as coisas; glória, pois, a ele eternamente. Amém."__ Romanos 11:33-36 <p class="intro"><span class="dropcap">P</span>az de Deus a todos meus amados irmãos, estamos chegando no nosso último estudo, dessa vez vamos tratar sobre o Soli Deo Gloria, ou Somente a Deus a Glória.</p> Ao longo dessas 5 semanas do estudo dos solas vimos que a igreja ia cada vez mais se desviando das Escrituras, as doutrinas se tornavam cada vez mais estranhas à Palavra de Deus e cada dia mais sendo introduzidas nas igrejas. A centralidade de Cristo ia dando lugar ao homem no centro, a tradição se tornava cada dia mais superior as Escrituras e os descendentes já nasciam aprendendo errado o que já vinha sendo ensinado errado pelos seus antepassados e tudo isso de tal maneira que a reforma protestante tornara uma necessidade para que pudéssemos voltar ao Evangelho genuíno. E o último pilar da reforma diz que devemos dar glória somente a Deus. Irmãos toda glória dada ao homem é uma glória vazia, uma glória passageira. É idolatria e abominação para Deus. O Senhor não divide a Sua glória com ninguém, somente Ele é Deus e Soberano. Irmãos, a Palavra de Deus diz, conforme os versículos que lemos que tudo vem Dele, tudo é por meio Dele e tudo é para a glória de Deus, a Palavra de Deus permanece para sempre, Ela jamais irá caducar. E o homem só encontra a sua razão de viver quando ele vive para a glória de Deus. Sabe por que? Por que nada desse mundo nos satisfaz, podemos ter o máximo de dinheiro do mundo, não sei quantas mil mulheres, podemos ter poder de governar as nações, comer todas as coisas boas do mundo, viajar aos quatro cantos da terra, ter fama, ter troféus, ter conquistas e nada nesse mundo conseguiria satisfazer nossa carne, nós somos como potes furados onde só quem pode tampar o furo e nos preencher é o Senhor. Precisamos compreender que Deus não é contra a nossa felicidade, muito pelo contrário, Ele quer que sejamos felizes, mas que sejamos felizes da maneira correta e não devemos nos contentar com essa alegria pequena, limitada, terrena e passageira. Deus nos criou e quer nos salvar para a maior de todas as felicidades, a felicidade de amá-Lo, de conhecê-Lo e de glorificá-Lo por que quando encontramos e estamos com nossa vida em Deus, aí sim encontramos a razão de estarmos vivos. Quando vivemos para a Glória de Deus, nós vivemos de forma maiúscula, de uma forma a alcançarmos lugares mais altos. Se reconhecermos a Sua glória, o Seu amor, a Sua bondade, a Sua majestade, a Sua grandeza, a Sua graça, aí sim nós estaremos aptos a encontrar em Deus o verdadeiro sentido da vida. Nosso coração é uma fábrica de ídolos, mas devemos saber que esses ídolos morrem, passam, são esquecidos com o tempo, no máximo fica uma frase, um texto, um livro ou um vídeo de alguns minutos. Exemplos como <NAME>, um ídolo nacional, eu não lembro direito, pois era muito pequeno, mas lembro do meu pai contando tudo, o Brasil parava aos domingos para assisti-lo, e após uma breve vida, ele morre, que Deus o tenha, mas o que ficou? Uma vinheta, um pequeno tema e quase ninguém lembra dele, somente os mais antigos. Por isso irmãos, não devemos dar glória a outro homem, a outra coisa se não ao único Deus que vive eternamente e nunca passará! Muitas pessoas acham crentes retrógrados para os dias atuais, pois hoje o que impera é a Lei do "Tudo posso em mim mesmo", a Lei do Deus passivo, do Deus somente de amor, do Deus da porta larga e do Deus que todos vão subir ao céu. E acabam por dizer: Por que eu não posso ir a festa, por que eu não posso curtir a vida a doidado, por que não posso beber? Por que não posso desfrutar os prazeres da juventude e da carne? Irmãos, o que a Palavra de Deus tem a dizer sobre isso está em 1 Coríntios 10:31: "Quer comais, quer bebais, quer façais qualquer coisa, façais para a Glória de Deus". Se quer curtir uma balada, uma bebida, então vá, mas se você for crente, você acreditar em Cristo como seu salvador diga: "Senhor estou aqui em Teu nome e glória ao Teu nome pelo que estou fazendo com meu corpo" e "Senhor estou bebendo, me embriagando e escandalizando as pessoas, dando mal testemunho, mas estou fazendo isso em Teu nome e glória ao Teu nome por isso". Ponto, faça o que quiser depois disso, mas lembre-se de Hebreus 4:13, onde diz: <b class="red">"E não há criatura alguma encoberta diante Dele; antes todas as coisas estão nuas e patentes aos olhos daquele com quem temos de tratar"</b>. E ainda Gálatas 6:7, que quem Lê sente que a espinha gela, que diz: <b class="red">"Não se deixem enganar: de Deus não se zomba. Pois o que o homem semear, isso também ele vai ceifar"</b>. Podemos fazer o que quiser, mas saibamos que Deus não esquece um til se quer e não adianta ler somente a parte que diz que Deus é amor, não devemos esquecer que o Senhor é Deus de justiça também, logo, se o que fazemos não for para honra e glória Dele, estamos plantando errado e certamente não colheremos o certo. Portanto irmãos, vamos procurar viver para a glória de Deus, sabemos que nunca será fácil, mas temos um Deus todo poderoso ao nosso lado, nos justificando e nos levantando sempre quando caímos. Vamos evitar atalhos que só nos causam dores e sofrimentos e vamos reconhecer Deus como nosso único e verdadeiro salvador dando glória somente a Ele! Espero que possam ter aprendido ao longo dessas cinco semanas de estudo assim como eu aprendi quando estava estudando essas ministrações sobre os solas da reforma, aprender sobre as coisas do Senhor é sempre muito bom e agradeço muito a oportunidade que Deus tem me dado para compartilhar o que aprendi acerca da Sua Palavra e espero também continuar sendo usado por Deus para fazer seu Reino avançar, pois **a Ele é toda honra, toda glória, todo louvor e toda adoração para todo o sempre, amém**!<file_sep>/_posts/2017-08-27-o-poder-da-igreja-que-ora.md --- layout: post title: O poder da igreja que ora - TESTEMUNHO categories: devocionais image: orando2.jpg logo: orando2.jpg description: O poder da igreja que ora - TESTEMUNHO - Atos 12:5 url: https://eltonsantos.github.io/devocionais/o-poder-da-igreja-que-ora/ author: <NAME> comments: true --- __"Pedro, pois, estava guardado no cárcere; MAS havia oração incessante a Deus por parte da igreja a favor dele."__ *Atos 12:5* <p class="intro"><span class="dropcap"> P</span>az de Deus meus amados irmãos, hoje venho aqui contar meu testemunho e relacionar o que aconteceu com <b class="red">o poder de uma igreja que ora</b>, que será o tema da pregação de hoje. O fato aconteceu faz um pouco mais de um mês, me deixou uma semana internado, mas graças a Deus o Senhor preservou minha vida e somente agora estou conseguindo escrever detalhadamente tudo o que vivi e senti.</p> Quero muito que esse artigo seja bastante detalhado (apesar de que ainda sim terei que abstrair várias coisas), pois esse testemunho me acompanhará por toda minha vida e daqui a alguns anos, quando for lê-lo novamente quero lembrar de todos os detalhes da época que mudou (novamente) minha vida. Tudo que está escrito aqui é para a honra e glória do nosso Senhor e espero que vocês possam também reconhecê-Lo como o Rei dos reis e saber que Ele existe e Ele é poderoso e é o nosso Eterno e maravilhoso Pai. Que esse testemunho possa abençoá-los e despertar o desejo de orarmos uns pelos outros, para que juntos possamos está pregando o Evangelho até quando Deus nos chamar. Era um dia comum, um dia qualquer, um dia como todos os demais, por volta de 11 da manhã de sábado do dia 8 de Julho, onde eu estava tomando banho pra sair, cortar o cabelo e depois talvez ir pro cinema ou talvez só andar no shopping, estava fazendo a barba e até então me sentindo muito bem. Depois de uma semana de trabalho, só queria poder descansar mentalmente para a noite poder ir a uma igreja a convite de um amigo meu, em um culto de jovens. E aqui entra o primeiro detalhe, <b>os nossos planos muitas vezes não são os planos de Deus</b>, não são aquilo que Deus quer para as nossas vidas, quem sabe poderia me acontecer algo em algum momento do dia bem pior e acabei tendo um livramento ou quem sabe não aconteceu por que precisava acontecer algo de melhor (eu particularmente acredito nessa hipótese, mas é um sentimento meu), não é possível saber com precisão, pois não somos os donos de todos os caminhos e escolhas que deixamos de fazer. Essa parte é com Deus, é Ele quem determina onde iremos e aquilo que não aconteceu só Ele realmente sabe o motivo. O fato é que simplesmente do nada, sem mais nem menos, eu começo a me sentir mal, muito mal, com uma dor aguda, com uma dor que aconteceu abruptamente e pela velocidade que aconteceu foi de longe a maior dor que já senti na vida. Imediatamente após a dor dar início, começo a me contorcer, a suar bastante frio, a perder todas minhas forças, a visão ficar turva e a fraqueza tomar conta do meu corpo. Pronto, simplesmente já não conseguia mais me colocar em pé! Eu estava sozinho em casa, pois meus pais tinham ido almoçar e aí aconteceu o primeiro desmaio, me contorcendo e chorando no banheiro, clamei ao Senhor sentindo que iria morrer. Aqui acontece outro detalhe, só pensava em arrependimento e a me arrepender de tudo o que fiz, pensava nas pessoas que magoei, nas pessoas que esperavam muito de mim e eu decepcionei e a me perguntar: céu ou inferno? Irmãos, a beira da morte não pensamos em tantas coisas não, só vem a nossa mente aquilo que realmente é mais importante pra nós! Após uns longos 10 minutos depois aproximadamente, meus pais chegaram e ficaram perplexos de como me deixaram, como me encontraram e minhas palavras foram: "Pai me leva urgente para o hospital, estou morrendo". E meus pais me levaram nos braços até o carro, devido não conseguir mais sentir minhas pernas, nem mexer meu corpo, pois tudo doía e eu só tinha forças para chorar e era ainda sim era involuntário. Chegando no hospital, eu estava bem mal, bastante pálido, sem enxergar e sem andar, só sentia dor, as pernas pareciam nem estarem mais comigo. Fui colocado na cadeira de rodas e ai só lembro que um médico me viu, falou algo que não consegui escutar direito e nem entender claramente e aí desmaiei não sei por quanto tempo, acordei já na cama em um leito e com a dor ainda mais forte que antes. Após um tempo na cama com dores fortes e a barriga bem inchada eu comecei a chorar de dor e a piorar, nesse ponto, quem relatou pra mim foram meus pais, pois eu simplesmente havia apagado novamente. Eu cheguei a sofrer uma parada cardíaca por alguns segundos, tempo que para meus pais parecia uma eternidade. Eles começaram a me chamar e eu não demostrava mais reação, não sentiam mais meu pulso e aí desesperados e chorando chamaram os médicos. Acordei com uns 3 médicos ao meu redor me chamando e fazendo aquela ressuscitação que fazem quando o coração para e com meu pai dizendo: "Meu Deus, vim do Rio de Janeiro só pra ver meu filho morrer, não faz isso comigo Senhor". Quando olhei pros meus pais e suas expressões de desespero e choro deles vi que o negócio foi sério mesmo. Para honra e glória do Senhor, Ele me trouxe de volta, me dando uma nova e mais uma chance para mudar minha vida, tentar fazer a diferença no mundo e falar sobre Cristo e o que Ele faz por mim. Após ter passado por esse sufoco, tive que ser transferido de hospital por questões burocráticas e estava ainda bastante mal, já era domingo a tarde e nada de melhorar 1% se quer. Madrugada em claro, todo furado e dopado de tanto remédio, antibióticos e tudo mais. E quando eu fui transferido para outro hospital eu pedi para que avisassem a minha igreja tudo o que estava passando e pedi oração para que eu pudesse resistir a dor. E um fato importante a observar aqui é que igreja não é somente as quatro paredes e pronto, ali é um templo, um lugar que nos reunirmos para estarmos em comunhão, para estarmos juntos, no entanto igreja, no sentido mais amplo ou mais restrito dependendo do referencial, somos nós, nós <b>somos templo e morada do Espírito Santo</b>, cada um de nós somos a igreja e juntos fazemos parte do corpo de Cristo, é como se cada um fosse um pedacinho de algo e fossemos juntando, juntando até fazermos parte de algo maior. E foi aí que minha igreja, cada um dos irmãos, passou a orar e interceder por mim. Irmãos, fisicamente eu estava acabado, sem fazer vocês sentirem pena, mas apenas relatando o ocorrido, eu estava numa cadeira de rodas, impossibilitado de fazer qualquer movimento sem sentir fortes dores em toda região do abdômen, precisando de ajuda para fazer as necessidades fisiológicas, usando fraldas e sendo cuidado pelos meus pais para tomar banho e outras coisas mais. Pois é, foi esse ponto que um jovem recém completados os 29 anos aparentemente saudável até então, cheio de sonhos e objetivos a realizar chegou. Irmãos, não somos nada e essa é uma realidade bem dura de ser engolida, <b>não regemos a nossa própria vida conforme nossa vontade</b>, pois quando Deus fala, é assim que será, é uma ordem expressa, ninguém pode ir contra a Sua vontade. Era ali que eu precisava está para ter esse momento com Ele, <b class="red">o Senhor é capaz de armar qualquer estratégia por mais dolorosa e confusa que seja para trazer seus filhinhos para próximos Dele</b>. Eu não podia falar muito ao telefone, pois nem isso eu conseguia direito, mas recebi uma ligação de um amigo meu, bastante usado por Deus, para falar algo especial para mim. Na ligação eu questionei bastante: por que isso, por que aquilo e meu amigo disse: Meu irmão, não pense em nada além do Senhor. Esse é o seu momento, é o seu momento com nosso Deus, pergunte o que Ele quer de ti, o que Ele quer pra sua vida, apenas tenha comunhão com Deus e esqueça de trabalho, de preocupações, do que você está passando, foque em Cristo e eu e a igreja estaremos orando por você! Fiz conforme meu amigo disse pra fazer. E fui perguntando em pensamento o que o Senhor queria de mim, se havia possibilidade deu viver ou se de fato tinha chegado ao fim. Então eu começei a me sentir em paz, e um dos pedidos que fiz a Deus foi que se fosse pra viver aqui não sendo um cristão verdadeiro que Ele me levasse, abdicaria da minha vida se não fosse para servi-Lo da forma que Ele precisa ser servido. E foi através de pensamentos, sonhos, pessoas e de Sua Palavra que Deus falou comigo, Deus me mostrou duas coisas, a primeira que eu viveria sim, mas para a honra e glória do Senhor, pois os Seus planos não haviam todos se cumpridos em minha vida, portanto, ainda sou um "imortal", rsrs. E claro, Ele pagou um alto preço pela minha vida, nada mais justo que honrá-Lo, amá-Lo e adorá-Lo por toda nossa vida. A segunda coisa que Ele me disse, essa ainda há de ser cumprida, Ele sabe o tempo certo disso acontecer e até lá eu vou vivendo e buscando ao Senhor durante os dias da minha vida. No entanto quando chegar a hora, certamente estarei dando meu maior testemunho para honra e glória do Senhor. Após três dias buscando ao Senhor sobre o que Ele quer de mim e receber a resposta, começei a melhorar. Os remédios começaram a fazer efeito, minha barriga começou a desinchar e começei a me reerguer, saí da cadeira de rodas, fui voltando a andar e fui me recuperando, para a honra e glória do Senhor nosso Deus. Confesso que me senti o Benjamin Button (do filme o curioso caso de Benjamin Button), parecia um velhinho de 1,10 aproximadamente, todo encolhido e fui me rejuvenescendo e crescendo até atingir meu tamanhão gigante de 1,70m rsrs. Um dos inúmeros ensinamentos que esse meu testemunho deixou pra mim e eu compartilho com vocês é em relação <b class="red">ao poder da igreja que ora</b>, como a oração de cada pessoa pode mudar minha vida, oração tanto das pessoas que cuidaram de mim, quanto das pessoas que vieram me visitar, quanto das que me ligaram, quanto das que clamaram, todas intercedendo a meu favor para que eu pudesse viver. Foi graças ao Senhor e as orações da igreja que eu pude ser curado, sair daquele hospital uma semana depois sem nenhuma sequela, mesmo após algo tão grave ter acontecido, algo que fez parar meu coração e no fim ter sido diagnosticado apenas com uma infecção intestinal. Esse é o poder de uma oração. Irmãos, quando vosso pastor convocar a igreja pra orar, quando houverem pedidos de oração, quando houverem campanhas, departamentos e momentos de oração na sua igreja, não vos abstenha de tal ato, sempre há uma pessoa que verdadeiramente precisa desse seu clamor. Foi a igreja orando e intercedendo por Pedro que ele pode se libertar do cárcere. Foi Jó orando pelos seus amigos que Deus mudou seu cativeiro. Foi Neemias orando e jejuando pelo seu povo enquanto Jerusalém estava destruída que sua oração fez as portas se abrirem, fez o Rei lhe estender a mão e a cidade ser restaurada. Foi Lutero orando que aconteceu um grande avivamento na Europa e no mundo. Foi Elias orando e jejuando que a resposta veio do alto! Foram inúmeros casos que nos provam que a igreja que ora possui uma força sobrenatural e que o Senhor se importa e inclina-se diante de um pedido com fé e com sinceridade. Diante de tudo que foi dito o que fazer? Primeiro, a Igreja precisa decidir <b>orar com fervor</b>, pagar esse preço! Segundo, <b>interceder uns pelos outros</b>, não desistam de orar pelas pessoas, vocês não sabem das suas necessidades atuais. E Deus confirmou essa pregação no meu coração por dois motivos, um na quarta quando falava com um irmão sobre orar uns pelos outros mesmo que não tenhamos mais contato com a pessoa e pelo segundo motivo, na ministração de quinta na minha igreja, que não devo deixar pra depois e nem adiar aquilo que Deus quer que você faça, que era justamente dar meu testemunho. Irmãos, eu quero orar por vocês, quero orar por todos vocês para que possam ser abençoados, abençoarem e atingirem um número incontável de pessoas quando se está orando uns pelos outros. <b class="red">A oração pode mudar uma história</b>, uma vida, um destino, pode tirar aquela pessoa do pecado, tirar a pessoa do fundo do poço, <b class="red">uma oração pode ser tudo o que uma pessoa quer pra aquele momento</b>. Deus deu esse poder a nós, que possamos usá-lo mais e melhor e não nos cansemos dessa pratica e nem de fazer o bem orando por alguém. Que o Senhor abençoe a todos, fiquem na paz de Deus! <file_sep>/_posts/2017-08-06-reflexao-salmo-13-v1a6.md --- layout: post title: Reflexão - Salmo 13:1-6 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Reflexão - Salmo 13:1-6 url: https://eltonsantos.github.io/devocionais/reflexao-salmo-13-v1a6/ author: <NAME> comments: true --- "Até quando te esquecerás de mim, SENHOR? Para sempre? Até quando esconderás de mim o teu rosto? Até quando consultarei com a minha alma, tendo tristeza no meu coração cada dia? Até quando se exaltará sobre mim o meu inimigo? Atende-me, ouve-me, ó Senhor meu Deus; ilumina os meus olhos para que eu não adormeça na morte; Para que o meu inimigo não diga: Prevaleci contra ele; e os meus adversários não se alegrem, vindo eu a vacilar. Mas eu confio na tua benignidade; na tua salvação se alegrará o meu coração. Cantarei ao Senhor, porquanto me tem feito muito bem. " *Salmos 13:1-6* <p class="intro"><span class="dropcap">P</span>az de Deus meus irmãos queridos, gostaria de fazer hoje uma breve reflexão sobre o silêncio de Deus, essa pregação foi ministrada em minha igreja e o Senhor falou muito no meu coração sobre ela. Irmãos, o silêncio de Deus nos da a impressão que Deus não está nem aí pra nós. O silêncio dói, nos trás angústia, depressão, não é fácil. Mas vejam até Jesus experimentou o silêncio de Deus conforme relato em Mateus 27:46. No entanto, vocês que estão passando pelo silêncio de Deus, eu os digo: preparem-se para serem escolha das coisas sobrenaturais que Ele tem preparado para sua vida. Em outras palavras, aqueles que estão passando por alguma prova e acham que Deus está o ignorando, se prepare, pois quando o tempo certo de Deus chegar, coisas sobrenaturais e reviravoltas irão acontecer em sua vida.</p> Nesse Salmo, Davi apresenta está com impaciência, ansiedade e revolta e mesmo Davi sendo um escolhido de Deus, não entende por que Deus faz isso com ele. Entretanto, há uma analogia simples aqui: "Quando o aluno está na prova, o professor fica em silêncio"! Aleluia! E durante esse Salmo, Davi se depara com 3 perplexidade ou incertezas, cada um com um alvo diferente: **1ª Incerteza - Incerteza com Deus (verso 1)** Davi julga que Deus o esqueceu, questiona-O que Deus escondeu Seu rosto e se angustia por parecer que o Senhor não o ver. No entanto irmãos, quero dizer que não há condição alguma, não há passagem, não há trecho, não há momento algum onde Deus abandona seu filho. Irmãos somos nós que o abandonamos sempre que pecamos, irmãos, nós somos vítimas de nossas próprias escolhas e após as frustrações culpamos a Deus por isso. Questionando do por que que Ele não evitou cairmos em nossas próprias ciladas. **2ª Incerteza - Incerteza com ele mesmo (verso 2a)** No verso 2, Davi se auto questiona, se pergunta até quando. (...) Irmãos, em nós constantemente há uma fé e uma incredulidade; há uma esperança e uma vontade de desistir; há uma vontade de pecar e uma vontade de está na graça, constantemente estamos lutando contra nós mesmo, contra nossa própria carne, e sabe quem vence nessa história? Quem você alimentar mais! Um exemplo disso são os canais de violência, ora, se todo dia você assiste programas onde só passam morte, assalto, homicídios aqui e ali, como você vai consegui sair na rua? Cada passo é uma sensação diferente de que algo ruim aconteça. Meus queridos, não devemos alimentar nossos medos! O que nós estamos alimentando? **3ª Incerteza - Incerteza com os outros (verso 2b)** Aqui Davi fala dos terceiros, dos problemas e inimigos que o afligem. Como se fosse, até quando Senhor, vou orar por esse menino e ele continua nas drogas? Até quando orarei por esse homem e ele ainda não está na igreja? Até quando Senhor vou sofrer por esse casamento, se ele está debaixo de Sua benção? E por fim, irmãos, esse Salmo nos dá 3 lições do que fazer quando a resposta de Deus não vem: **1ª lição** Não abandone o clamor, mesmo que ainda não tenha ouvido resposta, continue clamando, pois um clamor é uma súplica em voz alta e só vem de alguém que aprendeu a depender totalmente de Deus, e isso meus amados, pode passar o tempo que for, tem resposta! **2ª lição** Não abandone a confiança! Davi, no verso 5 disse que: "mas eu CONFIO na tua benignidade", ele não deixou de confiar em Deus, mesmo durante o Seu silêncio. Feliz é o homem que põe a sua confiança em Deus! **3ª lição** Não abandone o louvor! Davi começou o Salmo inquieto, mas no verso 6, ele louvou ao Senhor. Devemos louvar incondicionalmente, Ele dando o que queremos ou não. É louvar mesmo sabendo que tudo está perdido, mas crendo que um dia será tudo restaurado, independente do cenário atual. É louvar apenas pelo o que Ele é! Para concluirmos, Davi termina dizendo: " porquanto me tem feito muito bem "! Irmãos, mesmo nas lutas, Davi sabia que Deus estava no controle de tudo. Onde estamos, pra aquele momento, é o melhor de Deus para nossas vidas. Mesmo estando no deserto, naquele momento é o que Deus preparou de melhor, pois se estamos no deserto, é que estamos passando por lutas e provas para que possamos sair muito mais fortes do que nós entramos. <b class="red">Louve ao Senhor, por tudo o que Ele tem feito, não pare de adorá-Lo, pois é por Ele e para Ele todas as coisas</b>! Paz de Deus esteja convosco! Amém!<file_sep>/_posts/2017-06-23-reflexao-salmo-125-v1e2.md --- layout: post title: Reflexão - Salmo 125:1-2 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Reflexão - Salmo 125:1-2 url: https://eltonsantos.github.io/devocionais/reflexao-salmo-125-v1e2/ author: <NAME> comments: true --- "Os que confiam no SENHOR serão como o monte de Sião, que não se abala, mas permanece para sempre. Assim como estão os montes à roda de Jerusalém, assim o Senhor está em volta do seu povo desde agora e para sempre" Salmos 125:1-2 ## Reflexão - Salmo 125:1-2 <p class="intro"><span class="dropcap">I</span>rmãos, devemos confiar no Senhor para que não venhamos a nos abalar com as intempéries da vida. Aqui não fala sobre não haver nada para nos abalar, mas sim que nós não seremos abalados! Abalar significa desmoronar, cair. E de acordo com o texto bíblico nós não cairemos, resistiremos no Senhor!</p> Conseguimos fazer uma conexão dessa Salmo com 2 Coríntios 4:8, quando Paulo diz: "Em tudo somos atribulados, mas não angustiados; perplexos, mas não desanimados" Irmãos queridos, seremos atribulados em tudo, em todas as áreas nesse mundo, mas não nos angustiaremos, não nos abalaremos, não cairemos, pois a Graça de Deus é conosco! Apenas confiar no Senhor já seria suficiente para não se matar, não perder a saúde, não ficar doente com os problemas da nossa vida. No entanto, o Senhor foi mais além e no verso seguinte Ele fala que está em nossa volta sempre e sempre, por todos os séculos. É o que eu sempre digo, o Senhor está sempre pronto para aquele que O quiser, Ele não nos abandona, por mais distante que possamos querer estar Dele, Ele está sempre perto para quando nós O buscarmos! E por fim irmãos saibam que dificuldades, lutas e problemas virão, enquanto estivermos nesse mundo, porém a nossa condição para não nos abalar é: confiar no Senhor! E assim Ele será nossa rocha inabalável! Que possamos ter mais e mais fé a fim de confiar Nele com todo nosso coração. **Esse é o nosso Deus e essa é a Sua Palavra para sempre**! Amém!<file_sep>/_posts/2017-07-08-reflexao-salmo-116-v3e4.md --- layout: post title: Reflexão - Salmo 116:3-4 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Reflexão - Salmo 116:3-4 url: https://eltonsantos.github.io/devocionais/reflexao-salmo-116-v3e4/ author: <NAME> comments: true --- "As cordas da morte me envolveram, as angústias do inferno vieram sobre mim; aflição e tristeza me dominaram. Então clamei pelo nome do Senhor: 'Livra-me, Senhor!'" Salmos 116:3-4 <p class="intro"><span class="dropcap">O</span> contexto desse Salmo é quando Davi está passando por mais uma grande tribulação, Davi estava sendo perseguido e com a cabeça a prêmio. E como se não bastasse, Davi estava com uma tremenda angustia, como ele mesmo diz, angustia do inferno, em outras traduções fala-se em Sheol, palavra hebraica que quer dizer cova, túmulo, sepultura, embora também seja muito referida como inferno. Além disso, Davi também estava triste, e não somente triste, mas também dominado pela tristeza.</p> Imaginem, cercado por inimigos, tremendamente angustiado e aflito e em estado de profunda tristeza e depressão. Por menos, muitos poderiam desistir e dar cabo de sua vida nesse momento ou até mesmo antes dessas circunstâncias. Porém, Davi, agindo como um homem segundo o coração de Deus, clamou ao Senhor, dizendo do fundo de seu coração: Livra-me, Senhor! E nessa hora Davi, onde Davi invoca o Santo nome do Senhor, começa a falar da piedade, benignidade de Deus para com ele, que mesmo pecando e falhando, o Senhor não o abandonou e no verso 12 ele fala: ### "Que darei eu ao Senhor, por todos os benefícios que me tem feito?" Salmos 116:12 De fato, Davi reconheceu que não há nada que possamos fazer para retribuir todas as coisas boas que o Senhor fizera e faz por nós. O Senhor possui tamanha benevolência que até o mal que se levanta contra nós, que o deserto em que somos levados e que as tentações que insurgem são para o nosso aprendizado e para nossa caminhada cristã. Todavia, devemos ser gratos a Deus não pelo o que Ele faz em nossas vidas, mas sim pelo o que Ele é! Devemos reconhecer que somente o Senhor é misericordioso para salvar uma alma aflita, somente o Senhor é o nosso Deus. E Davi responde o verso 12 concluindo que a única maneira de agradecer a Deus por tudo o que Ele tem feito é O adorando de todo o nosso coração e entendimento e O bem dizendo com nossa boca e com toda nossa força! Amém!<file_sep>/_posts/2017-05-17-como-passar-o-dia-com-deus.md --- layout: post title: Como passar o dia com Deus? categories: devocionais image: orando.jpg logo: orando.jpg description: Como passar o dia com Deus? url: https://eltonsantos.github.io/devocionais/como-passar-o-dia-com-deus/ author: <NAME> comments: true --- ## Como passar o dia com Deus? <p class="intro"><span class="dropcap">E</span>m meio a um mundo tão caído, tão tribulado e em meio a tantos afazeres mundanos, como então passar o dia com Deus? Meditando na Palavra que o Senhor tem trazido a meu coração, vou escrever brevemente um passo a passo de como passar o dia com Deus. Lembrando que essas coisas valem para mim e para ti e que possamos buscar sempre mais está na presença de Cristo durante nossas vidas. Então, como passar o dia com o Senhor?</p> ### Oração pela manhã "E, levantando-se de manhã, muito cedo, fazendo ainda escuro, saiu, e foi para um lugar deserto, e ali orava." Marcos 1:35 Inicialmente faça a oração pela manhã. Assim que você acordar, não faça outra coisa que não seja orar, mesmo que seja curta. Não faça coisa alguma e nem encare o mundo antes de orar. Essa oração serve para começar o dia com uma unção poderosa que te prepara e fortalece para que tenha forças espirituais para enfrentar o mundo. Um exemplo disso é Daniel, que três vezes por dia se prosta em oração, conforme relato bíblico: "Daniel, pois, quando soube que o edito estava assinado, entrou em sua casa (ora havia no seu quarto janelas abertas do lado de Jerusalém), e três vezes no dia se punha de joelhos, e orava, e dava graças diante do seu Deus, como também antes costumava fazer." Daniel 6:10 Além de Daniel, Jesus também nos dá exemplo de oração pela manhã. Esses são homens que faziam suas orações manutinas e recebiam o galardão matinal do Senhor. A oração da vantagem a nós sobre o pecado. Essa curta oração é para apresentar seu dia, pedir direção, força e as orientações diárias. Um contato inicial com o Senhor. Lembre-se: NUNCA esqueça de orar antes de sair de casa. ### Meditar 1 ou 2 capítulos "E, pela manhã, veio a mim a palavra do Senhor" Ezequiel 12:8 Ler um ou dois capítulos servem como alicerce para o seu dia, servem como seu "café da manhã" e durante o seu dia esses capítulos te sustentarão e servirão para sua reflexão. É sempre bom ter um texto em mente para usar na hora certa. "Antes tem o seu prazer na lei do Senhor, e na sua lei medita de dia e de noite." Salmos 1:2 Aquele texto que foi usado pela manhã é que embasará, fortificará e protegerá você durante o seu dia. ### Prática da oração em espírito "Orando em todo o tempo com toda a oração e súplica no Espírito, e vigiando nisto com toda a perseverança e súplica por todos os santos" Efésios 6:18 É uma oração interior. Devemos está adorando, louvando a Deus e se arrependendo no espírito. É uma contínua comunhão o Senhor, tendo temor e vigiando o tempo todo. Pedindo a misericórdia de Deus, orando pelos enfermos, pelos pecadores, mantendo comunhão constante, pedindo para não cair em tentação, citando e proclamando textos em sua mente. Como pode ser lido aqui: "Mas vós, amados, edificando-vos a vós mesmos sobre a vossa santíssima fé, orando no Espírito Santo" Judas 1:20 E pode ser encontrado também aqui: "Que farei, pois? Orarei com o espírito, mas também orarei com o entendimento; cantarei com o espírito, mas também cantarei com o entendimento." 1 Coríntios 14:15 Quer passar o dia todo com Deus? Observe a prática da oração no espírito e ore com o pensamento, com o coração, pedindo livramento, se santificando e intercendendo pelos outros. ### Dê testemunho com a sua conduta No meio da sua rotina sempre dê testemunho de sua fé. Testemunhar a sua fé é exalar o bom perfume de Cristo. Aqui existe uma divisão da forma de dá testemunho: forma passiva e forma ativa. __1) Passiva__ "Pois zelamos do que é honesto, não só diante do Senhor, mas também diante dos homens." 2 Coríntios 8:21 Aqui dê testemunho através de sua conduta, de sua postura pessoal, profissional e ética onde estiver. Sem dúvidas, esse é o método mais eficaz para trazer almas para Jesus, pois não há forma de usar máscaras quando as próprias pessoas estão reparando implicitamente em sua conduta. Logo, quando elas estiverem passando por algum problema é você, por causa de sua conduta, que elas irão procurar. "Ninguém despreze a tua mocidade; mas sê o exemplo dos fiéis, na palavra, no trato, no amor, no espírito, na fé, na pureza." 1 Timóteo 4:12 Devemos sempre manter uma postura cristã nos diferentes lugares que formos, zelando o que é nosso e o que Deus colocou sob nossa responsabilidade. Devemos ter temor a Deus no trabalho, sermos hornestos, justos, íntegros e devemos ser exemplos para que os ímpios possam ver em nós algo diferente, santo, bíblico. __2) Ativa__ "Mas somente tinham ouvido dizer: Aquele que já nos perseguiu anuncia agora a fé que antes destruía. E glorificavam a Deus a respeito de mim." Gálatas 1:23,24 Quando possível, pregue o evangelho. Testemunhe às pessoas em sua volta o que Deus fez e tem feito por você. Diga quem você era, o que fazia, o que acreditava e quais consequências teria se continuasse nesse caminho. Depois diga quem você é agora, o que faz, em que acredita e a consequência dessa mudança. "Testificando, tanto aos judeus como aos gregos, a conversão a Deus, e a fé em nosso Senhor Jesus Cristo." Atos 20:21 Não esqueça de mensionar o arrependimento e a fé, pois sem essas duas coisas não poderão ingressar no reino de Deus. ### Fazer boas obras "Porque somos feitura sua, criados em Cristo Jesus para as boas obras, as quais Deus preparou para que andássemos nelas." Efésios 2:10 Além de dar bom testemunho ativamente, devemos também fazermos boas obras, pois o evangelho também é ação, é partir pro combate, é ir pro ataque e não ficarmos somente entre as quatro paredes da igreja. Devemos aprender e irmos em frente para fazermos o reino avançar. "E os nossos aprendam também a aplicar-se às boas obras, nas coisas necessárias, para que não sejam infrutuosos." Tito 3:14 Devemos nos envolver em ações sociais, estender a mão para o caído, visitar enfermos, consolar alguém que está abatido. A comunhão com Deus consiste em ajudar o seu próximo, consiste em ser um agente da graça! ### Prática da oração secreta "Mas tu, quando orares, entra no teu aposento e, fechando a tua porta, ora a teu Pai que está em secreto; e teu Pai, que vê em secreto, te recompensará publicamente." Mateus 6:6 Essa é a grande hora de conversarmos com Deus. Aqui é diferente de todas as demais orações, pois é somente você e Deus, entre quatro paredes. Aqui é a hora de humilhação, da súplica, do clamor e adoração ao Senhor. Separe esse momento importantíssimo em alguma parte do dia. Normalmente esse momento é dividido em, pelo menos, quatro etapas: **1. Adoração** Onde você entoará louvores e dirá o quanto O ama. **2. Ações de graça** Momento de agradecimento pelo o que Ele tem feito por sua vida, agradecer por tudo e qualquer coisa, pelos livramentos dados, bençãos, saúde e por todas as coisas importantes em sua vida e da do seu próximo. **3. Confição e arrependimento** Esse é o momento de humilhar-se na presença do Senhor, confessando seus pecados, pedindo Sua compaixão e se arrepender verdadeiramente do que fez e/ou com quem fez, para que dessa maneira o Senhor possa ouvir suas orações. **4. Petição** Provavelmente é o momento que mais fazemos e exercemos: o momento de pedir! Aqui é a hora de apresentarmos as nossas necessidades. Já O adoramos, O louvamos, O agradecemos por tudo, confessamos nossos pecados e nos arrependemos, e agora sim, estamos prontos para aprensentar as nossas petições a Cristo. Peçe qualquer coisa, porém sempre coloque a vontade do Senhor na frente, pois só a vontade Dele é boa, perfeita e agradável. ### Estudo das Escrituras Aqui não é somente um momento que devemos ter, mas sim como devemos conduzir nossa vida: lendo a Bíblia toda até o fim da vida. É necessário reservar um tempo diário para isso. Diferentemente do início quando ruminamos um ou dois capítulos, aqui é necessário estudar as Escrituras. Faça anotações e escreva sobre o que entendeu. Decore as Escrituras, pois muitos desses versículos devem ser usados para entregar à alguém alguma palavra profética ou para confrontar alguém em momento oportuno. Faça perguntas sobre algo e encontre a resposta na própria Bíblia. Não leia com pressa, não é preciso traçar metas a atingir, pois o importante é compreender e aplicar o que foi lido, não importando o quanto tempo demorará. A Bíblia não é um livro comum, é um livro que devemos ler também com o coração, metaforicamente falando, pois a cada leitura ela te dará uma resposta diferente do mesmo versículo lido anteriormente. ### Faça culto no lar Por fim e para fechar o dia, faça um culto no seu lar. É um culto em que a família se reune, escolhe um texto, ler, comenta, depois cantam hinos e por fim oram. E aplicam o texto lido na vida. Isso gera crescimento, santificação, ajuda no desenvolvimento da fé e da espiritualidade. Não há uma única maneira de fazer esse culto, cada um executa da forma que puder, o importante é não deixar de fazer, nem que seja uma vez na semana. É isso meus irmãos, dessa forma, finalmente conseguimos entender o que Paulo quis dizer quando falou que devemos orar sem cessar. Orar sem cessar não significa ficar em um mosteiro só orando sem parar até a volta de Jesus. Orar sem cessar é: Orar pela manhã, meditar nos versículos, orar em espírito, dar testemunho com a sua conduta, fazer boas obras, orar em secreto, estudar as Escrituras e fazer culto no seu lar! Amém!<file_sep>/_posts/2018-01-16-devocional-16-01-2018.md --- layout: post title: Devocional - 16/01/2018 categories: devocionais image: reflexao.jpg logo: reflexao.jpg description: Devocional - 16/01/2018 - Cuidando dos seus sonhos - Gn 37:5 url: https://eltonsantos.github.io/devocionais/devocional-16-01-2018/ author: <NAME> comments: true --- __"Teve José um sonho, que contou a seus irmãos; por isso o odiaram ainda mais."__ Gn 37:5 <p class="intro"><span class="dropcap">P</span>az de Deus meus amados irmãos! Estou aqui pra fazer uma breve reflexão sobre o cuidado que devemos ter com nossos sonhos. Irmãos, conforme a Bíblia relata, José contou seu sonho a seus irmãos e a ira deles se levantou ainda mais, pois José não tinha 11 irmãos que pudesse contar, mas sim 11 inimigos devido a seu pai o amar mais do que a eles. E isso nos ensina uma boa lição, não devemos compartilhar os nossos sonhos com toda e qualquer pessoa, pois nem todo mundo está preparado para ouvir aquilo que você está prestes a contar, seja por causa de inveja, ciúmes ou apenas despreparo.</p> Devemos permanecer calados e contar somente para aquele grupo restrito de pessoas que mais confiamos ou que sentimos realmente no coração que devemos compartilhar. Tenha cuidado, pois infelizmente o inimigo usa pessoas da nossa própria casa para nos desanimar e nos impedir das nossas conquistas. Seus sonhos, na maioria das vezes, expressam a vontade de Deus na sua vida, e sendo estes a vontade de Deus, de alguma forma eles irão se cumprir, no tempo certo. Procure contar seus sonhos a Deus e entregá-los a Ele. Porém, não passe a vida inteira só sonhando, lute por eles e dê o primeiro passo em direção a seus sonhos. Tem pessoas que querem muito serem usadas por Deus, mas tem preguiça de orar e ler a Bíblia, assim como tem pessoas que querem muito um emprego melhor, mas tem preguiça de estudar e se especializar. Deus está no controle de sua vida, permaneça fiel a Ele, mas saiba que **Ele é justo e não te dará seus objetivos se Ele não reconhecer que você está se esforçando**. Ele capacita você para realizar seu sonho, mas é você que terá que fazer o necessário para realizar-los. Que Deus abençoe a todos!<file_sep>/_posts/2017-04-30-da-fraqueza-se-fez-forca.md --- layout: post title: Da fraqueza se fez força categories: devocionais image: fraqueza-forca.jpg logo: fraqueza-forca.jpg description: Da fraqueza se fez força - Hebreus 11 & Juízes 16. url: https://eltonsantos.github.io/devocionais/da-fraqueza-se-fez-forca/ author: <NAME> comments: true --- >__"Os quais pela fé venceram reinos, praticaram a justiça, alcançaram promessas, fecharam as bocas dos leões, Apagaram a força do fogo, escaparam do fio da espada, da fraqueza tiraram forças, na batalha se esforçaram, puseram em fuga os exércitos dos estranhos."__ Hebreus 11:33,34 ### Da fraqueza se fez força <p class="intro"><span class="dropcap">E</span>sse texto remete a fé, pois ela da vitória, força, coragem para que dá fraqueza se faça força. Essa palavra é para todos aqueles em que algum momento esteve ou está fraco e que pela fé em Cristo Jesus conseguiu se reanimar e vencer os obstáculos.</p> A nossa luta é contra os pecados que afrontam a nossa carne, contra nossas inclinações, sedução, acusação, contra o maligno, pecado, opressão... Lutamos contra o pecado, contra o diabo, contra o inimigo o tempo todo. Durante nossa caminhada nos deparamos com diversos momentos que esgotam o nosso vigor, que nos fazem ficarmos sem forças pra reagir e pra lutar. Essa mensagem é pra você que está sem força para enfrentar os intempéries da vida. ### HISTÓRIA DE SANSÃO Vou falar de uma história bastante conhecida da Bíblia, a história de Sansão, que encontra-se em *Juízes 16*. Homem forte, incomum, conhecido pela sua força sobrenatural, colossal, mas que é homem de carne e osso e é submetido as mesmas paixões que a gente, cometeu os mesmos erros que somos acometidos e enfrentou dificuldades das quais também nós passamos. Sansão perdeu toda força, em todas as áreas se abateu, mas no fim de sua história a Bíblia diz que ele recobrou as suas forças. Isso pode acontecer com você que se sente abatido, desencorajado e sem ânimo, e que a minha oração para mim e para ti é que sejamos fortalecido e revigorado pela graça de Deus. Sansão era conhecido pela sua força descomunal, sobre-humana. Mas no decorrer da história ele perde toda essa força, ele fica sem força para orar, jejuar, caminhar… a mesma coisa pode acontecer com qualquer um de nós. A única coisa que nao foi tirada de nós é a consciência de que se está fraco e necessitado. Sansão passou por quatro etapas de fraqueza, acredito que sejam todas as etapas, cada uma delas o abalou tremendamente. E vou falar um pouco sobre cada uma delas. #### **1) FRAQUEZA FÍSICA** >"Então ela o fez dormir sobre os seus joelhos, e chamou a um homem, e rapou-lhe as sete tranças do cabelo de sua cabeça; e começou a afligi-lo, e retirou-se dele a sua força." _Juízes 16:19_ Conhecido pela sua força muscular, por conseguir derrubar vários homens com uma única porrada, ser um dos homens mais fortes que já existiu, enfim. Talvez você esteja assim, sem força física. Você não consegue mais ter o mesmo rendimento, cuidar de uma casa, governar os filhos, a sua esposa, cuidar do seu ministério. Talvez você esteja acometido por uma enfermidade, está doente, com complicações no corpo. Você não é mais forte, determinado, robusto, ativo, alegre e está passando por momento de grande fraqueza física. Os reflexos de Sansão foram comprometidos, ele perdeu o poder de reação e não teve como se defender quando os Filisteus vieram até ele. Talvez você esteja assim, sem força para pregar, atuar no serviço cristão, evangelizar, trabalhar na obra, suar a camisa. Sansão conheceu a fraqueza física, e talvez você esteja debilitado, doente, sobre efeito de algum remédio. A sua parte física pode está comprometida a tal ponto que possivelmente esteja comprometendo seu desempenho em casa, no trabalho, como marido, como esposa. Talvez até mesmo seja algo espiritual, tão intenso que não deixa você sair da cama e do seu estado. #### **2) FRAQUEZA ESPIRITUAL** A segunda fraqueza a qual Sansão sofreu encontra-se em *Juízes 16:20* que diz: >"E disse ela: Os filisteus vêm sobre ti, Sansão. E despertou ele do seu sono, e disse: Sairei ainda esta vez como dantes, e me sacudirei. Porque ele não sabia que já o Senhor se tinha retirado dele." Foi o que aconteceu com Sansão que no segundo momento experimentou a fraqueza espiritual. Você pode está sem força pra orar, sem força para estimular o coração a orar. Suas orações são repetitivas, cansativas, fracas, desanimadas. Oração seca, vazia, pobre. Você não consegue mais ler as escrituras, sem vontade de ler, sem aquela fome de leitura. Insensibilidade espiritual. Não é mais sensível a voz de Deus . Perdeu aquele sentido aguçado, está vendo tudo com os olhos naturais. A Fraqueza espiritual nos leva a perder o domínio próprio, que é fruto do Espírito, como diz em *Gálatas 5:22*. Você fica abatido, carnal, desequilibrado, ansioso. Desequilibrado emocionalmente, você não consegue mais dominar sua carne, cai nos menores pecados, sem força para resistir as setas, sem forças pra ir a um culto e quando vem não consegue se concentrar em um louvor. Se estou fraco espiritualmente não consigo orar. Como a Bíblia diz em *1 Tessalonicenses 5:19* *"Não extingais o Espírito"*, pois uma vez que estou fraco espiritualmente não consigo mortificar a carne, não consigo ter vontade de me alegrar em Cristo e diante das circunstâncias da vida, eu não consigo enfrentá-las. Sendo fraco espiritualmente, a Bíblia não tem significado nenhum pra mim, não passa de um livro comum. Jesus veio para que tenhamos vida e vida em abundância e que tenhamos plenitude do conhecimento de Deus. #### **3) FRAQUEZA EMOCIONAL** A outra fraqueza, é a fraqueza emocional, que se encontra em *Juízes 16:21*: >"Então os filisteus pegaram nele, e arrancaram-lhe os olhos, e fizeram-no descer a Gaza, e amarraram-no com duas cadeias de bronze, e girava ele um moinho no cárcere." Sansão foi do fundo do poço emocionalmente e a Bíblia nos dá 3 razões: **Primeira razão** Mudança na aparência Cortaram os cabelos, mudou a aparência. Talvez uma deformidade física, uma sequela de acidente, cirurgia, choque, agressão física que te comprometeu, que marcou você. Talvez você tenha passado por algum trauma desse, uma falência econômica, quebra na empresa, agressão no lar e de alguma forma afetou a sua aparência. Sansão sofreu nesse aspecto, devido aqueles longos cabelos serem cortados. (nos dias de hoje poderíamos até dizer que Sansão estava sofrendo bullying). **Segunda razão** Perda da liberdade Sansão nunca teve imposição alguma e agora teve seu direito privado, limitado, contido. Seus direitos foram retirados, perdeu o arbítrio, vencido. Teve perda do direito de liberdade negado, pois agora ele estava preso. **Terceira razão** Perda da visão Imagina, preso no calabouço, cabelo raspado e cego, com os olhos vazados. Sansão entrou em depressão, essa é a fraqueza emocional. Pastor você não tá vendo que nao venho mais a igreja? Por que meu filho? Depressão. Talvez você esteja preso em seu próprio corpo, abandonado. Sem ânimo. Desequilibrado emocionalmente. Esse pode ser você e isso pode se voltar de várias formas, inclusive na parte ofensiva, como descontrole, ira ou de forma passiva, como medo, se esconder, não querer falar ou ver ninguém. Sansão estava irreconhecível. Fraqueza emocional que o foi imposta. #### **4) FRAQUEZA MORAL** Quarta e última fraqueza está em *Juízes 11:25*: >"E sucedeu que, alegrando-se-lhes o coração, disseram: Chamai a Sansão, para que brinque diante de nós. E chamaram a Sansão do cárcere, que brincava diante deles, e fizeram-no estar em pé entre as colunas." Os Filisteus estavam festejando com Sansão, ele estava sendo envergonhado, zombado. Estavam fazendo chacota com aquele homem que era o mais forte do mundo, e agora que estava sem força, estava sendo feito como palhaço, estava com orgulho ferido. Fraqueza moral, falido, um "zé ninguém". Talvez você seja um fracasso como marido, como esposa, como pai e sem poder esboçar reação nenhuma. Talvez você tenha perdido tudo o que tinha e virou vergonha. Talvez seja você tivera sido um homem santo, uma mulher de oração, uma mulher virtuosa e de repente tudo desaba. Torna-se uma vergonha moral. Faliu na liderança da casa, faliu com a esposa, faliu com o marido, faliu com os filhos. O estado de Sansão é exatamente esse, ele conheceu todos os estágios da fraqueza. Qual dos estágios você se reconhece aqui? Quais dos estágios nós nos reconhecemos? No meu caso, eu me reconheci em todos! Você já chegou a ser fiel a Ele e agora está passando por isso, e nao consegue sair, e sempre afundando mais e mais e está como Sansão, preso, com moral baixa e irreconhecível. Essas são as quatro fraquezas que nos acometem. **Agora, como vencer? Como sair disso? Como mudar o cativeiro? Como Sansão conseguiu mudar esse quadro terminal, como ressurgir?** *Hebreus 11* tem a resposta, no verso 32 ela cita Sansão e no 33 fala o que causou a vitória. O capítulo narra homens que tiveram grandes vitórias no antigo testamento. E se refere também a Sansão, onde ele dá fraqueza fez força. Por meio da fé, da fraqueza ele conseguiu extrair força. Essa é a resposta. Foi pela fé que Sansão mudou seu cativeiro, pela fé que Sansão se reanimou e teve seu quadro mudado. Uma das coisas que pode está te impedindo são os embaraços, as futilidades, o pecado pode ser a causa da fraqueza. Sansão fez tudo de errado, matou muita gente, adulterou, fez o que era mal aos olhos do Senhor. Essas coisas foram as razões a levar Sansão a fraqueza. *Hebreus 12:1-2* fala o que está faltando. **Arrependimento e fé.** Será que foi isso que aconteceu com Sansão? Como Sansão da fraqueza tirou força? *Juízes 16:28* tem a resposta! A oração de Sansão! Oração de arrependimento, deixou o embaraço e o pecado. Se arrependeu de tudo que fez, deixou os embaraços, se humilhou, abandonou tudo que tinha feito e reconheceu o Seu Senhor como o Deus de Israel! **"Lembra-te de mim Senhor"**. O Resultado está no verso 29, ele recobrou as forças! Meu irmão, a palavra pra você é: Como vou tirar da fraqueza força? **Pela fé**. O que é fé? É depositar sua confiança Nele, Cristo e confiar em Suas promessas. Sansão, ali naquele estado, mesmo irreconhecível, abatido, moído, esquecido e tendo conhecido todos os estágios da fraqueza, de repente ele disse: Não! Eu sou do Senhor, eu tenho um pacto, meu Deus é o Deus de Israel. Meu Deus é grande, esse não é o estado final da igreja, não é o estado final do santo, Deus tem promessa pra mim, eu não vou morrer aqui, eu não vou ficar nesse cativeiro enquanto Deus não cumprir todas as promessas Dele em minha vida. Eu sou imortal até que Ele cumpra tudo o que prometeu! E a Bíblia diz que o cabelo começou a crescer e suas forças voltaram e ali naquele lugar Sansão matou mais gente do que em toda sua vida! O diabo não tem a última palavra, a morte não tem a última palavra, o pecado NÃO tem a última palavra. **Lamentações 3:16** diz: _"Quero trazer à memória aquilo que me dá esperança!"_ Deus não esqueceu de você, Deus não esqueceu das promessas que Ele tem pra sua vida. Ele está vendo você, Deus não terminou os Seus planos com você. Ele ja deu projetos a você, traga a memória essas palavras. Recobre suas forças, recobre seu ânimo. Deus diz: quem disse que vou deixar você perder sua família? Quem disse que vou deixar teu marido, tua esposa ir pro inferno? Quem disse que seus filhos vão terminar assim? Quem disse que Eu não sou poderoso para curar você? Quem disse que Eu não posso mover esses papéis a seu favor? Quem disse que eu não posso pelejar sua causa e mover céu e terra por isso? Deixe o pecado e olhe para a Cruz, para Seu amor, sua misericórdia. Foi isso que Sansão fez! E para concluir: **O que é a fé?** É a certeza daquilo que se esperam e a prova das coisas que se não vêem. Ele disse? Tá dito, ponto final. Prometeu? Tá prometido, ponto final. Deus não se esqueceu de Sansão quando ele disse: **"Lembra-te de mim só mais essa vez"**. É claaaaro que Deus vai lembrar de você também meu irmão! Sempre Ele se lembrará de uma alma arrependida, mesmo que essa alma esteja no uuuuuúltimo estágio da fraqueza **o Senhor diz: Eu me lembrarei de ti**!! Amém!
371168d06b37553186e933220e89b57ecee913a3
[ "Markdown", "JavaScript" ]
38
Markdown
eltonsantos/eltonsantos.github.io
2c9f7ca8a1ec33b600a83901b87c946170418794
dc9cb094ea905ec14b3f8b47af227fad05ec425c
refs/heads/master
<file_sep> AC_PREREQ([2.69]) AC_INIT([hwflush-check], [0.1], [<EMAIL>]) AM_INIT_AUTOMAKE #AC_CONFIG_HEADERS([config.h]) AC_PROG_CC AM_PROG_CC_C_O # for O_DIRECT #AC_USE_SYSTEM_EXTENSIONS AC_GNU_SOURCE AC_C_CONST AC_CHECK_HEADERS([fcntl.h getopt.h netdb.h stdio.h stdlib.h string.h]) AC_CHECK_HEADERS([sys/ioctl.h sys/socket.h sys/stat.h sys/types.h stdint.h signal.h]) AC_CHECK_HEADERS([netinet/tcp.h unised.h malloc.h stdarg.h sys/time.h]) AC_CHECK_HEADERS(pthread.h,, [AC_MSG_ERROR([pthread.h required])]) # required libraries AC_CHECK_LIB(pthread, pthread_create, [LIBS="$LIBS -lpthread"]) AC_CONFIG_FILES([ Makefile ]) AC_OUTPUT <file_sep># don't check NEWS, AUTHORES and so on AUTOMAKE_OPTIONS = foreign sbin_PROGRAMS = hwflush-check hwflush_check_SOURCES = hwflush-check.c <file_sep>**hwflush−check** is a tool to check how data is flushed on disk. hwflush-check tests filesystem consystency under power failure conditions. It does write; fsync; power-cut, and then checks that the data it has written is actully on disk after bootup. Some description: https://static.openvz.org/vz-man/man1/pstorage-hwflush-check.1.gz.html Usage example: ----- 1. On a server with the hostname test_server, run: hwflush-check -l 2. On a client, run: hwflush-check -s test_server -d /mnt/test -t 100 3. Turn off the client (poweroff button or unplug power cable), and then turn it on again. 4. Restart the client: hwflush-check -s test_server -d /mnt/test -t 100 5. Check the server output for lines containing the message "cache error detected!" License: BSD like license <file_sep>/* * Copyright (c) 2011-2013 Parallels Inc. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. Compile: gcc hwflush-check.c -o hwflush-check -lpthread Usage example: ------- 1. On a server with the hostname test_server, run: hwflush-check -l 2. On a client, run: hwflush-check -s test_server -d /mnt/test -t 100 3. Turn off the client, and then turn it on again. 4. Restart the client: hwflush-check -s test_server -d /mnt/test -t 100 5. Check the server output for lines containing the message "cache error detected!" */ #include <errno.h> #include <fcntl.h> #include <getopt.h> #include <netdb.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/types.h> #include <stdint.h> #include <signal.h> #include <netinet/tcp.h> #include <pthread.h> #include <unistd.h> #include <malloc.h> #include <stdarg.h> #include <sys/time.h> enum prealloc_type { PA_NONE = 0, PA_POSIX_FALLOC = 1, PA_WRITE = 2, PA_LAST = PA_WRITE, }; enum { SYNC_MFSYNC, SYNC_MFDATASYNC, SYNC_FSYNC, SYNC_FDATASYNC, SYNC_SYNC, }; static int alloc_type = PA_POSIX_FALLOC; static int sync_type = SYNC_FDATASYNC; static int use_dio = 0; static int send_exit = 0; static int is_server = 0; static int is_check_stage = 0; static int is_prepare = 0; static char *host = NULL; static char *port = "32000"; static char *dir = NULL; /* block size should be a multiply of 8 */ static off_t blocksize = 16 * 1024 - 104; static off_t blocksmax = 1024 + 1; static unsigned int threads = 32; #define THREADS_MAX 1024 static int exit_flag = 0; static int do_mfsync_ioc(int *fd, int *datasync, int nr); static void logtime(FILE * fp) { struct timeval tv; char buff[128]; struct tm tm_local; gettimeofday(&tv, NULL); strftime(buff, sizeof(buff), "%d-%m-%y %H:%M:%S", localtime_r(&tv.tv_sec, &tm_local)); fprintf(fp, "%s.%03u ", buff, (unsigned int)tv.tv_usec / 1000); } static void logerr(const char * fmt, ...) { va_list args; logtime(stderr); va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); } static void logout(const char * fmt, ...) { va_list args; logtime(stdout); va_start(args, fmt); vfprintf(stdout, fmt, args); va_end(args); } /* returns 0 if ok or -errno if error */ int swrite(int fd, void *buf, int sz) { int w = sz; while (w) { int n = write(fd, buf, w); if (n < 0) { if (errno == EINTR) continue; return -errno; } if (n == 0) return -EIO; buf += n; w -= n; } return sz; } /* returns number of bytes read */ int sread(int fd, void *buf, int sz) { int r = 0; while (sz) { int n = read(fd, buf, sz); if (n < 0) { if (errno == EINTR) continue; return -errno; } if (n == 0) break; buf += n; r += n; sz -= n; } return r; } static int connect_to_server(void) { struct addrinfo *result, *rp, hints; int sock = -1; int ret; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_V4MAPPED | AI_ADDRCONFIG; /* resolve address */ ret = getaddrinfo(host, port, &hints, &result); if (ret != 0) { logerr("getaddrinfo() failed: %s\n", gai_strerror(ret)); return -1; } /* getaddrinfo() returns a list of address structures. Try each address until we successfully connect(2). If socket(2) (or connect(2)) fails, we (close the socket and) try the next address. */ for (rp = result; rp != NULL; rp = rp->ai_next) { sock = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); if (sock < 0) { logerr("Could not create socket: %s\n", strerror(errno)); continue; } if (connect(sock, rp->ai_addr, rp->ai_addrlen) < 0) { logerr("connect() failed: %s\n", strerror(errno)); close(sock); sock = -1; continue; } logerr("Connected to %s:%s\n", host, port); break; /* Success */ } if (rp == NULL) /* No address succeeded */ logerr("Could not connect to server\n"); /* addrinfo is not needed any longer, free it */ freeaddrinfo(result); return sock; } static uint64_t find_last_counter(int fd, char *buf, off_t *offset) { uint64_t cnt = 0; off_t i, len; for (i = 0; i < blocksmax; i++) { uint64_t t; unsigned int c, j; len = sread(fd, buf, blocksize); if (len < 0) { logerr("read() failed: %s\n", strerror(-len)); break; } if (len != blocksize) { logerr("Failed to read block %llu\n", (unsigned long long)i); break; } t = *(uint64_t*)buf; if (cnt >= t) break; /* validate block */ memset(&c, t & 0xff, sizeof(c)); for (j = sizeof(t); j < blocksize; j += sizeof(c)) if (c != *(unsigned int*)(buf + j)) break; if (j < blocksize) { logerr("Block %llu with number %llu is invalid " "at %d, blocksize %llu \n", (unsigned long long)i, (unsigned long long)t, j, (unsigned long long)blocksize); break; } /* ok, block is good, store counter */ cnt = t; } *offset = blocksize * i; return cnt; } /* press Ctrl-C twice on freeze */ static void sighandler(int sig) { if (exit_flag) { signal(sig, SIG_DFL); raise(sig); } exit_flag = 1; } struct client { int sock; pthread_mutex_t mutex; }; struct worker { pthread_t thr; uint32_t id; int32_t ret; struct client *cl; }; struct cache_state { uint64_t gen; uint32_t error; }; enum { REP_FL_UPDATE = 1, /* Update generation packet */ REP_FL_SYNC = 2, /* Sender want reply*/ REP_FL_ACK = 4, /* This is ACK packet */ REP_FL_ERR = 8, /* Indicate error */ REP_FL_STOP = 0x10,/* Recepient should stop */ }; struct report { uint32_t id; uint32_t flags; uint64_t gen; } __attribute__((aligned(8))); int do_sync(int fd) { int datasync = 0; switch (sync_type) { // TODO: batch several files and perform one call case SYNC_MFSYNC: return do_mfsync_ioc(&fd, &datasync, 1); case SYNC_MFDATASYNC: datasync = 1; return do_mfsync_ioc(&fd, &datasync, 1); case SYNC_FSYNC: return fsync(fd); case SYNC_FDATASYNC: return fdatasync(fd); case SYNC_SYNC: sync(); return 0; default: return -EINVAL; } } static void *run_client_thread(void *arg) { struct worker *w = arg; int ret = 0; int fd; int o_flags; off_t offset = 0; char *buf; char file[strlen(dir) + 6]; struct report rp = { .id = w->id, .flags = 0, .gen = 0 }; struct report reply; buf = valloc(blocksize); if (!buf) { ret = -ENOMEM; logerr("malloc() failed\n"); goto out; } snprintf(file, sizeof(file), "%s/%04u", dir, w->id); /* first try to find last used counter */ o_flags = O_RDWR; if (use_dio) o_flags |= O_DIRECT; fd = open(file, o_flags, 0666); if (fd < 0) { if (is_check_stage) { rp.gen = 0; logerr("Failed to open file '%s': %s\n", file, strerror(errno)); goto send_report; } if ((errno != ENOENT) || ((fd = creat(file, 0666)) < 0)) { ret = fd; logerr("Failed to open file '%s': %s\n", file, strerror(errno)); goto out_free; } switch (alloc_type) { case PA_NONE: break; case PA_POSIX_FALLOC: if (posix_fallocate(fd, 0, blocksize * blocksmax) < 0) { ret = -ENOSPC; logerr("fallocate() failed: %s\n", strerror(errno)); goto out_close_fd; } break; case PA_WRITE: { off_t num, count = blocksize * blocksmax; int ret; memset(buf, 0, blocksize); while (count) { num = blocksize < count ? blocksize : count; ret = write(fd, buf, num); if (ret < 0) { logerr("write() failed: %s\n", strerror(errno)); goto out_close_fd; } count -= ret; } lseek(fd, 0, SEEK_SET); break; } default: ret = -EINVAL; logerr("Incorrect prealloc type "); goto out_close_fd; break; } rp.gen = 0; } else { rp.gen = find_last_counter(fd, buf, &offset); if ((ret = lseek(fd, offset, SEEK_SET)) < 0) { logerr("lseek() failed: %s\n", strerror(errno)); goto out_close_fd; } logerr("id %u: latest valid id %llu\n", w->id, (unsigned long long)rp.gen); } send_report: rp.id = w->id; rp.flags = REP_FL_SYNC; if (send_exit) rp.flags |= REP_FL_STOP; pthread_mutex_lock(&w->cl->mutex); ret = swrite(w->cl->sock, &rp, sizeof(rp)); if (ret < 0 ) { logerr("Failed to write to socket: %s\n", strerror(-ret)); goto out_unlock; } ret = sread(w->cl->sock, &reply, sizeof(reply)); pthread_mutex_unlock(&w->cl->mutex); if (ret != sizeof(reply)) { logerr("Corrupted msg from server id:%d, got:%d expect:%ld\n", rp.id, ret, sizeof(reply)); ret = -EINVAL; goto out_close_fd; } if (reply.id != w->id || !(reply.flags & REP_FL_ACK)) { ret = -EINVAL; logerr("Bad replay from server id:%d \n", rp.id); goto out_close_fd; } ret = 0; if (reply.flags & REP_FL_ERR) { ret = -EIO; logout("id %d: Server reported cache error, " " server idx %llu > disk idx %llu \n", w->id, (unsigned long long)reply.gen, (unsigned long long)rp.gen); goto out_close_fd; } if (is_check_stage || send_exit) goto out_close_fd; if ((ret = do_sync(fd))) { logerr("do_sync(2) failed: %s\n", strerror(errno)); goto out_close_fd; } if (is_prepare) goto out_close_fd; rp.flags = REP_FL_UPDATE; while (!exit_flag) { int r; if (offset >= blocksize * blocksmax) { offset = 0; lseek(fd, 0, SEEK_SET); } rp.gen++; *(uint64_t*)buf = rp.gen; memset(buf + sizeof(rp.gen), rp.gen & 0xff, blocksize - sizeof(rp.gen)); r = swrite(fd, buf, blocksize); if (r != blocksize) { logerr("Failed to write to file '%s': %s\n", file, strerror(-r)); rp.flags |= REP_FL_ERR | REP_FL_STOP; } ret = do_sync(fd); if (ret < 0) { logerr("do_sync() failed: %s\n", strerror(errno)); rp.flags |= REP_FL_ERR | REP_FL_STOP; } pthread_mutex_lock(&w->cl->mutex); r = swrite(w->cl->sock, &rp, sizeof(rp)); pthread_mutex_unlock(&w->cl->mutex); if (r < 0) { logerr("Failed to write to socket: %s\n", strerror(-r)); break; } offset += blocksize; } out_close_fd: if (fd >= 0) close(fd); out_free: free(buf); out: pthread_mutex_lock(&w->cl->mutex); w->ret = ret; pthread_mutex_unlock(&w->cl->mutex); return NULL; out_unlock: pthread_mutex_unlock(&w->cl->mutex); goto out_close_fd; } static int run_client(void) { struct stat st; int ret = 0; int flag = 1; int i; struct client clnt; struct worker *thrs; if (stat(dir, &st) < 0) { if (errno != ENOENT) { logerr("stat() for '%s' failed: %s\n", dir, strerror(errno)); return -1; } if (mkdir(dir, 0777) < 0) { logerr("Failed to create directory '%s': %s\n", dir, strerror(errno)); return -1; } } else if (!S_ISDIR(st.st_mode)) { logerr("'%s' is not a directory\n", dir); return -1; } clnt.sock = connect_to_server(); if (clnt.sock < 0) return -1; if (setsockopt(clnt.sock, IPPROTO_TCP, TCP_NODELAY, (char*)&flag, sizeof(int)) < 0) { logerr("setsockopt(TCP_NODELAY) failed: %s\n", strerror(errno)); ret = -1; goto out_close_sock; } /* make things fancier for the server */ signal(SIGINT, sighandler); signal(SIGTERM, sighandler); signal(SIGPIPE, SIG_IGN); thrs = malloc(threads * sizeof(struct worker)); if (!thrs) { logerr("malloc() failed\n"); ret = -1; goto out_close_sock; } pthread_mutex_init(&clnt.mutex, NULL); for (i = 0; i < threads; i++) { thrs[i].id = i; thrs[i].cl = &clnt; thrs[i].ret = 0; if (pthread_create(&thrs[i].thr, NULL, run_client_thread, (void*)&thrs[i])) { logerr("Failed to start thread %u\n", i); ret = -1; break; } } for (i--; i >= 0; i--) pthread_join(thrs[i].thr, NULL); for (i = 0; i < threads; i++) { if (thrs[i].ret != 0) { logerr("Thread %d failed with:%d %s\n", thrs[i].id, thrs[i].ret, strerror(-thrs[i].ret)); if (!ret) ret = thrs[i].ret; } } free(thrs); out_close_sock: close(clnt.sock); return ret; } static int prepare_for_listening(void) { struct addrinfo *result, *rp, hints; int sock = -1; int ret; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */ ret = getaddrinfo(NULL, port, &hints, &result); if (ret != 0) { logerr("getaddrinfo: %s\n", gai_strerror(ret)); return -1; } /* getaddrinfo() returns a list of address structures. Try each address until we successfully bind(2). If socket(2) (or bind(2)) fails, we (close the socket and) try the next address. */ for (rp = result; rp != NULL; rp = rp->ai_next) { int flag = 1; sock = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); if (sock < 0) { logerr("Could not create socket: %s\n", strerror(errno)); continue; } if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char*)&flag, sizeof(int)) < 0) { logerr("setsockopt(SO_REUSEADDR) failed: %s\n", strerror(errno)); close(sock); sock = -1; continue; } if (bind(sock, rp->ai_addr, rp->ai_addrlen) < 0) { logerr("bind() failed: %s\n", strerror(errno)); close(sock); sock = -1; continue; } logerr("Listening on port %s\n", port); break; /* Success */ } if (rp == NULL) /* No address succeeded */ logerr("Could not bind\n"); freeaddrinfo(result); /* No longer needed */ return sock; } static int set_sock_keepalive(int sock) { int val = 1; /* enable TCP keepalives on socket */ if (setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)) < 0) { logerr("setsockopt() failed: %s\n", strerror(errno)); return -1; } /* set idle timeout to 1 second */ if (setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0) { logerr("setsockopt() failed: %s\n", strerror(errno)); return -1; } /* set consecutive interval to 1 second */ if (setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, &val, sizeof(val)) < 0) { logerr("setsockopt() failed: %s\n", strerror(errno)); return -1; } if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char*)&val, sizeof(int)) < 0) { logerr("setsockopt(TCP_NODELAY) failed: %s\n", strerror(errno)); return -1; } /* set number of keepalives before dropping to 3 */ val = 3; if (setsockopt(sock, SOL_TCP, TCP_KEEPCNT, &val, sizeof(val)) < 0) { logerr("setsockopt() failed: %s\n", strerror(errno)); return -1; } return 0; } static int run_server(void) { int sock; struct sockaddr_storage peer_addr; socklen_t peer_addr_len; char boundaddr[NI_MAXHOST] = ""; ssize_t nread; struct cache_state *cache; struct report rp; int ret = 0; signal(SIGINT, sighandler); signal(SIGTERM, sighandler); sock = prepare_for_listening(); if (sock < 0) return -1; if (listen(sock, 5) < 0) { logerr("listen() failed: %s\n", strerror(errno)); ret = -1; goto out_close_sock; } cache = calloc(THREADS_MAX, sizeof(*cache)); if (!cache) { logerr("calloc() failed\n"); ret = -1; goto out_close_sock; } while (!exit_flag) { char claddr[NI_MAXHOST]; int conn; peer_addr_len = sizeof(struct sockaddr_storage); conn = accept(sock, (struct sockaddr *) &peer_addr, &peer_addr_len); if (conn < 0) { logerr("accept() failed: %s\n", strerror(errno)); ret = -1; break; } ret = set_sock_keepalive(conn); if (ret < 0) { close(conn); break; } ret = getnameinfo((struct sockaddr *) &peer_addr, peer_addr_len, claddr, NI_MAXHOST, NULL, 0, NI_NUMERICHOST); if (ret < 0) { logerr("getnameinfo() failed: %s\n", gai_strerror(ret)); close(conn); break; } if (boundaddr[0] == 0) { strncpy(boundaddr, claddr, NI_MAXHOST-1); logerr("Accepting messages from %s\n", boundaddr); } else { if (strncmp(boundaddr, claddr, NI_MAXHOST) != 0) { logerr("Skip connection from invalid address %s\n", claddr); close(conn); continue; } logerr("Restarted connection from %s\n", boundaddr); } while (!exit_flag) { uint64_t expected; int new_error = 0; nread = sread(conn, &rp, sizeof(rp)); if (nread < 0) { logerr("read() failed: %s\n", strerror(-nread)); break; } if (nread == 0) break; if (nread != sizeof(rp)) { logerr("Failed to read counter\n"); break; } if (rp.id >= THREADS_MAX) { logerr("Bad id received: %u\n", rp.id); break; } if (rp.flags & REP_FL_UPDATE) expected = cache[rp.id].gen + 1; else /* simple check */ expected = cache[rp.id].gen; if (rp.gen < expected) { logout("id %u: %llu > %llu, cache error detected!\n", rp.id, (unsigned long long)expected, (unsigned long long)rp.gen); new_error = 1; cache[rp.id].error = 1; ret = 1; } else if (rp.gen > expected) logerr("id %u: %llu -> %llu, probably missed some packets\n", rp.id, (unsigned long long)cache[rp.id].gen, (unsigned long long)rp.gen); if (rp.flags & REP_FL_ERR) { logerr("id %u: transaction id:%llu failed on client (gen %llu)\n", rp.id, (unsigned long long)cache[rp.id].gen, (unsigned long long)rp.gen); new_error = 1; cache[rp.id].error = 1; ret = 1; } if (rp.flags & REP_FL_UPDATE && !new_error) cache[rp.id].gen = rp.gen; if (rp.flags & REP_FL_STOP) exit_flag = 1; if (rp.flags & REP_FL_SYNC) { struct report reply; int r; reply.flags = REP_FL_ACK; reply.gen = expected; reply.id = rp.id; if (new_error) reply.flags |= REP_FL_ERR; r = swrite(conn, &reply, sizeof(reply)); if (r < 0 && !ret) ret = r; } } close(conn); logerr("Connection closed\n"); } free(cache); out_close_sock: close(sock); return ret; } #define EXT4_IOC_MFSYNC _IO('f', 43) struct ext4_ioc_mfsync_info { uint32_t size; uint32_t fd[0]; }; static int do_mfsync_ioc(int *fd, int *datasync, int nr) { struct ext4_ioc_mfsync_info *mfsync_ioc; int i; size_t msize; msize = sizeof(*mfsync_ioc) + sizeof(mfsync_ioc->fd[0]) * nr; mfsync_ioc = malloc(msize); memset(mfsync_ioc, 0, msize); for (i = 0; i < nr; i++) { mfsync_ioc->fd[i] = fd[i]; if (datasync[i]) mfsync_ioc->fd[i] |= (1 << 31); } mfsync_ioc->size = nr; return ioctl(fd[0], EXT4_IOC_MFSYNC, mfsync_ioc); } static const char *progname(const char *prog) { char *s = strrchr(prog, '/'); return s ? s+1 : prog; } static void usage(const char *prog) { fprintf(stderr, "Flush test tool.\n"); fprintf(stderr, "Usage: %s [options...]\n", progname(prog)); fprintf(stderr, "Options:\n" " -l, --listen Run as a server.\n" " -c, --check Check data\n" " -x, --exit Send exit signal to server\n" " -P, --prepare Perform only preparation stage\n" " -s, --server=IP Set server host name or IP address\n" " -p, --port=PORT Set server port\n" " -d, --dir=DIR Set test directory\n" " -D, --fdatasync Use fdatasync(2) \n" " -F, --fsync Use fsync(2) \n" " -S, --sync Use sync(2) \n" " -M, --mfsync Use mfsync \n" " -m, --mfdatasync Use mfdatasync \n" " -b, --blocksize=SIZE Set block size\n" " -n, --blocksmax=NUM Set maximum number of blocks\n" " -t, --threads=NUM Set number of client threads to use\n" " -O, --direct Open files with O_DIRECT" " -a, --alloc_type=NUM Set prealloc type 0:NONE, 1:posix_falloc, 2:write\n" " -h, --help Show usage information\n" ); exit(-1); } static const struct option long_opts[] = { {"listen", 0, 0, 'l'}, {"check", 0, 0, 'c'}, {"prepare", 0, 0, 'P'}, {"exit", 0, 0, 'x'}, {"server", 1, 0, 's'}, {"port", 1, 0, 'p'}, {"dir", 1, 0, 'd'}, {"blocksize", 1, 0, 'b'}, {"blocksmax", 1, 0, 'n'}, {"threads", 1, 0, 't'}, {"alloc_type", 1, 0, 'a'}, {"direct", 0, 0, 'O'}, {"help", 0, 0, 'h'}, {"fdatasync", 0, 0, 'D'}, {"fsync", 0, 0, 'F'}, {"sync", 0, 0, 'S'}, {"mfsync", 0, 0, 'M'}, {"mfdatasync", 0, 0, 'm'}, {0, 0, 0, 0} }; int main(int argc, char *argv[]) { int ch; /* process options, stop at first nonoption */ while ((ch = getopt_long(argc, argv, "DFMPSclms:p:d:a:b:f:n:t:h", long_opts, NULL)) != -1) { switch (ch) { case 'l': is_server = 1; break; case 'x': send_exit = 1; break; case 'c': is_check_stage = 1; break; case 'P': is_prepare = 1; break; case 'O': use_dio = 1; break; case 's': host = optarg; break; case 'p': port = optarg; break; case 'd': dir = optarg; break; case 'a': alloc_type = atoi(optarg); if (alloc_type > PA_LAST) { fprintf(stderr, "Invalid prealloc type\n"); usage(argv[0]); } break; case 'M': sync_type = SYNC_MFSYNC; break; case 'm': sync_type = SYNC_MFDATASYNC; break; case 'S': sync_type = SYNC_SYNC; break; case 'F': sync_type = SYNC_FSYNC; break; case 'D': sync_type = SYNC_FDATASYNC; break; case 'b': { char *p; blocksize = strtoull(optarg, &p, 10); if (p[0] != '\0') { fprintf(stderr, "Invalid block size\n"); usage(argv[0]); } blocksize &= ~7LL; break; } case 'n': { char *p; blocksmax = strtoull(optarg, &p, 10); if (p[0] != '\0') { fprintf(stderr, "Invalid maximum number of blocks\n"); usage(argv[0]); } break; } case 't': { char *p; threads = strtoul(optarg, &p, 10); if (p[0] != '\0') { fprintf(stderr, "Invalid number of threads\n"); usage(argv[0]); } if (threads > THREADS_MAX) { fprintf(stderr, "Number of threads is too big\n"); usage(argv[0]); } break; } default: usage(argv[0]); return 1; } } setlinebuf(stdout); if (!is_server) { if (host == NULL) { fprintf(stderr, "Please specify server address\n"); usage(argv[0]); } if (dir == NULL) { fprintf(stderr, "Please specify test directory\n"); usage(argv[0]); } return run_client(); } else return run_server(); }
5437d16b2f91ae7ec2edce2dc13378e451f7d5d3
[ "Markdown", "C", "Makefile", "M4Sugar" ]
4
M4Sugar
vitlav/hwflush-check
f297eeaf738a9f5488d40f00984120ffe1f60627
8291609b017e0f0c7c7e1b17e4799a2028daceb3
refs/heads/main
<repo_name>KonstLeb/GBBasicPython<file_sep>/BPLesson4Task3.py print(f'В пределах от 20 до 240 на 20 или 21 без остатка делятся следующие числа: \n{[element for element in range(20, 241) if element%20 == 0 or element%21 ==0]}') input()<file_sep>/BPLesson4Task1.py from sys import argv script_name, hours, hour_rate, bonus = argv try: print(f"Рассчитанная заработная плата сотрудника: {int(hours) * int(hour_rate) + int(bonus)}") except ValueError: print("Вы коварно ввели в командной строке одно или более отнюдь не число! Будем решать эту проблему в Басманном суде, а пока перезапустите скрипт!")<file_sep>/BPLesson4Task7.py def fact(n): import math while True: try: n = int(input("Введите целое положительное число: ")) if n <= 0: print("Положительные люди вводят ПОЛОЖИТЕЛЬНЫЕ числа! Попробуйте снова!") else: break except ValueError: print("Конечно, отличать другие знаки от чисел - большое искусство, но постарайтесь сосредоточиться!") for element in range(1, n+1): yield f'Факториал числа {element} равен {math.factorial(element)}' n = None for el in fact(n): print(el) input()<file_sep>/BPLesson4Task5.py initial_list = [el for el in range(100, 1001) if el%2 == 0] print (f"Получился такой исходный список: \n{initial_list}\n") from functools import reduce total = reduce( lambda itogo, number: itogo * number, initial_list ) print("Получилось такое произведение всех элементов списка: ") print(total) input()<file_sep>/BPLesson4Task6_2.py from sys import argv script_name, number_of_iterations = argv def povtor(number_of_iterations): try: int(number_of_iterations) except ValueError: print("Вы злодейски ввели в командной строке отнюдь не целое положительное число! Это было обидно! Перезапустите скрипт!") exit() list = [3, 2, 2, "да", 2, 2, 3, "домомучительница", "карлсон"] print(f"Элементы заранее определенного списка в количестве {number_of_iterations} штук: ") import itertools count = 0 for item in itertools.cycle(list): if count > int(number_of_iterations): break print (item) count += 1 povtor (number_of_iterations)<file_sep>/BPLesson4Task6_1.py from sys import argv script_name, first, second = argv def numbers(first, second): try: int(first) int(second) except ValueError: print("Вы коварно ввели в командной строке одно или более отнюдь не число! Будем решать эту проблему в Басманном суде, а пока перезапустите скрипт!") exit() print("Генератор нагенерировал вот что: ") from itertools import count for i in count(int(first)): if i > int(second): break else: print (i) numbers (first, second)<file_sep>/BPLesson4Task2.py from random import randint initial_list = [] while True: try: number_of_elements = int(input("Список из скольких случайных чисел Вы хотите сформировать? Введите значение от 2 до 1000: ")) if number_of_elements not in range(2, 1001): print("Неужели трудно ввести число в указанном диапазоне?") continue except ValueError: print("Если Вы снова введете не число, я сделаю что-нибудь страшное! Например, зарыдаю!") else: break initial_list = [randint(0,1000) for element in range(0, number_of_elements)] final_list = [el for el in initial_list if el > initial_list[initial_list.index(el) - 1] and initial_list.index(el) != 0] print (f"Был сгенерирован такой начальный список чисел: \n{initial_list}\n") print(f"А это список чисел из начального списка, значения которых больше предыдущего элемента: \n{final_list}") input()
69a13debdd4eb3bf7a331aaf095e471e2707d43f
[ "Python" ]
7
Python
KonstLeb/GBBasicPython
2445673d1fc80f7ececf4cb3e3f37141bf890ce2
2ae5afa4192612e9554fe439b55e809c454a9b3e
refs/heads/master
<file_sep>#################################################################################### ##### Calculadora de Markowitz ##### Projeto de calculadora em R para realizar a otimizacao de portfolio ##### seguindo o modelo de proposto por Markowitz em 1952, para isso usamos os ##### dados de Petrobras(PETR4),ITAU (ITSA4) e o IBOVESPA (BOVA11). ##### Os dados foram importados em formato .csv do yahoo financas, e correspondem ##### ao periodo de 26/09/2018 até 26/09/2019(diario). ##### A modelagem se baseia em três pilares (MARKOWITZ, 1952): ##### 1) retorno esperado para a carteira; ##### 2) a proporção com que os ativos comporão a carteira; ##### 3) a variância da carteira que representará o risco incorrido para a mesma. ##### ##### ##### ~~Essa carteira hipotetica será criada usando-se 60% de PETR4 e ##### 40% de ITSA4 e X% de BOVA11~~ ##### para esse calculo a funcao ##### deve calcular: ##### 1- o rendimento diario de cada ativo; ##### 2- A media do rendimento de cada ativo; ##### 3- A variancia deste rendimento; ##### 4- A covariancia dos ativos; ##### 5- A correlação entre eles. ##### PS: Caso você não tenha as bibliotecas a seguir instaladas basta, desmarcar ##### o sinal de comentário (#) e rodar as linhas abaixo para instalá-las, ##### lembre-se de deixar rodar até o final o procedimento. #################################################################################### #install.packages("quadprog") #install.packages("PerformanceAnalytics") #install.packages("IntroCompFinR", repos="http://R-Forge.R-project.org") library(IntroCompFinR) #importa arquivos de dados Petrobras PETR4 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/PETR4.SA.csv", stringsAsFactors=FALSE) #View(PETR4) #Importa arquivos de ITSA4 ITSA4 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/ITSA4.SA.csv", stringsAsFactors=FALSE) #View(ITSA4) #importa dados magazineluiza MGLU3 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/MGLU3.SA.csv", stringsAsFactors=FALSE) #View(MGLU3) #Importa o BOVA11, para mais tarde tratar as excecoes BOVA <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/BOVA11.SA.csv",stringsAsFactors = F) #View(BOVA) #Importa dados Bradesco BBDC4 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/BBDC4.SA.csv", stringsAsFactors=FALSE) #Importa dados Fleury FLRY3 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/FLRY3.SA.csv", stringsAsFactors=FALSE) #Importa dados Banco do Brasil BBSA4 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/BBAS3.SA.csv", stringsAsFactors=FALSE) #Importa dados Azul linhas aéreas AZUL4 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/AZUL4.SA.csv", stringsAsFactors=FALSE) #Importa dados MERV <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/MERV.csv", stringsAsFactors=FALSE) #Importa dados Via Varejo (Vivara) VVAR3 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/VVAR3.SA.csv", stringsAsFactors=FALSE) #Importa dados MDIAS Branco MDIA3 <- read.csv("C:/Users/pedro/RStudioProjects/Markowitz/Markowitz_wallet/Projeto/MDIA3.SA.csv", stringsAsFactors=FALSE) #nomes para facilitar a leitura nomes<-c("PETROBRAS","ITAU","MAGALU","BRADESCO","FLEURY","BB","AZUL","VIVARA","MDIAS") #funcao para calcular o retorno diario Retornos= function(Abertura,Fechamento){ #Não é necessario o uso do for aqui rendimento_dia<-Fechamento-Abertura return(rendimento_dia) } ##BOVA,PETR4,ITSA4 são dataframes por isso os dados de abertura e fechamento ## acabaram se tornando factors ## em BOVA11 temos duas ocorrencias de dias registradas como null ## vou trata-las #retorno_BOV<-Retornos(as.numeric(BOVA$Open),as.numeric(BOVA$Close)) #retorno_BOV retorno_PETR4<-Retornos(PETR4$Open,PETR4$Close) retorno_ITSA4<-Retornos(ITSA4$Open,ITSA4$Close) retorno_MGLU3<-Retornos(MGLU3$Open,MGLU3$Close) retorno_BRAD4<-Retornos(BBDC4$Open,BBDC4$Close) retorno_FLRY3<-Retornos(FLRY3$Open,FLRY3$Close) retorno_BBSA4<-Retornos(BBSA4$Open,BBSA4$Close) retorno_AZUL4<-Retornos(AZUL4$Open,AZUL4$Close) retorno_VVAR3<-Retornos(VVAR3$Open,VVAR3$Close) retorno_MDIA3<-Retornos(MDIA3$Open,MDIA3$Close) retornos_total<-cbind(retorno_PETR4,retorno_ITSA4,retorno_MGLU3,retorno_BRAD4, retorno_FLRY3,retorno_BBSA4,retorno_AZUL4,retorno_VVAR3, retorno_MDIA3) colnames(retornos_total)<-nomes retornos_total #Matriz de Medias rendimento_medio<-rbind(mean(retornos_total[,1]),mean(retornos_total[,2]), mean(retornos_total[,3]),mean(retornos_total[,4]),mean(retornos_total[,5]), mean(retornos_total[,6]),mean(retornos_total[,7]),mean(retornos_total[,8]), mean(retornos_total[,9])) rownames(rendimento_medio)<-nomes rendimento_medio #Matriz das variancias Variancias<-rbind(var(retornos_total[,1]),var(retornos_total[,2]), var(retornos_total[,3]),var(retornos_total[,4]),var(retornos_total[,5]) ,var(retornos_total[,6]),var(retornos_total[,7]),var(retornos_total[,8]) ,var(retornos_total[,9])) rownames(Variancias)<-nomes Variancias #Matriz de Covariancias CoVariancias<-cov(retornos_total) CoVariancias #Matriz de Correlações Correlacao<-cor(CoVariancias) Correlacao #Graficos para visulizar a saídas
c2782f6ca6cde8a778db2b4726e42417cf18af03
[ "R" ]
1
R
PedroHenrique31/MarkowitzCalculator
0f916ca80e114626d962f6b93878b3d13a81b943
ec13d622d02e87159e863903541d718fba356f24
refs/heads/master
<file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package inputoutput; import java.io.*; import java.io.IOException; public class InputOutput { public static void main(String[] args) throws IOException { // TODO code application logic here BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); //char c = br.read(); String c; do{ c=br.readLine(); System.out.print(c); }while(c!="q"); } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package stringevaluation; import java.util.Scanner; /** * * @author micro computer */ public class StringEvaluation { static char find(StringBuffer s){ int n = s.length(); for(int i=0; i<n; i=i+2){ if(i<n && i+2<n){ if(s.charAt(i+1)=='A'){ if(s.charAt(i)=='0' || s.charAt(i+2)=='0'){ s.setCharAt(i+2, '0'); } else s.setCharAt(i+2, '1'); } if(s.charAt(i+1)=='A'){ if(s.charAt(i)=='0' && s.charAt(i+2)=='0'){ s.setCharAt(i+2, '0'); } else s.setCharAt(i+2, '1'); } if(s.charAt(i+1)=='A'){ if(s.charAt(i)==s.charAt(i+2)){ s.setCharAt(i+2, '0'); } else s.setCharAt(i+2, '1'); } } } return s.charAt(n-1); } public static void main(String[] args) { // TODO code application logic here StringBuffer S = new StringBuffer(); Scanner sc = new Scanner(System.in); S = sc.next(); //int result = Integer.valueOf(S.); System.out.println(find(s)); } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package linklistprog; import java.util.LinkedList; import java.util.Scanner; /** * * @author micro computer */ public class LinkListProg { public static void main(String[] args) { Scanner sc = new Scanner(System.in); String s;//=sc.nextLine(); int ch; LinkedList<Integer> Test; Test = new LinkedList<>(); do{ Test.add(sc.nextInt()); System.out.print("To continue: Press 1 "); ch=sc.nextInt(); }while(ch==1); System.out.println(Test); Test.add(3, 00); System.out.println(Test); } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package pkgreturn.two.prime.numbers; import java.util.Scanner; /** * * @author micro computer */ public class ReturnTwoPrimeNumbers { int n; ReturnTwoPrimeNumbers(int n){ this.n = n; } //int temp=n-2; int nomber(int temp){ return primeno(temp)!=0 && primeno(n-temp)!=0?temp:nomber(temp-1); } int primeno(int n){ if ( n < 2 ){ return 0; } for(int i= (int)Math.sqrt(n);i>=2;i--){ if(n%i==0){ return 0; } } return n; } public static void main(String[] args) { Scanner Sc = new Scanner(System.in); int t = Sc.nextInt(); while(t>0){ int n = Sc.nextInt(); ReturnTwoPrimeNumbers r = new ReturnTwoPrimeNumbers(n); int a = r.nomber(n-2); System.out.println(n-a+" "+a); t--; } } } <file_sep>ClearTax_Assign_2_3 Kamal2 <file_sep>package cleartax_assign; /* package codechef; // don't place package name! */ import java.util.*; import java.lang.*; import java.io.*; import java.io.FileReader; import java.util.Iterator; import java.util.Map; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.json.simple.parser.*; /* Name of the class has to be "Main" only if the class is public. */ class ClearTax_Assign_2_3 { public static ArrayList<String> ans = new ArrayList<String>(); public static void transform(Map m,Map opt){ ans.add((String)opt.get(m.get("op"))); Map temp; try{ temp = (Map)m.get("lhs"); ans.add(String.valueOf(m.get("rhs"))); ans.add(")"); transform(temp,opt); } catch(Exception e){ try{ temp = (Map)m.get("rhs"); ans.add(String.valueOf(m.get("lhs"))); ans.add(")"); transform(temp,opt); } catch(Exception ex){ String s = String.valueOf(m.get("lhs")); if(s.equals("x")) ans.add(String.valueOf(m.get("rhs"))); else ans.add(s); } } } public static void main (String[] args) throws java.lang.Exception{ Object obj = new JSONParser().parse(new FileReader("JSONExample1.json")); Map opt = new LinkedHashMap(); opt.put("multiply"," / "); opt.put("divide"," * "); opt.put("add"," - "); opt.put("subtract"," + "); opt.put("equal"," = "); // typecasting obj to JSONObject JSONObject jo = (JSONObject) obj; System.out.print("x = "); ans.add(String.valueOf(jo.get("rhs"))); try{ Map m = (Map)jo.get("lhs"); transform(m,opt); } catch(Exception e){ ans.add(String.valueOf(jo.get("rhs"))); } int count=0; for(String s:ans) if(s.equals(")")) count++; for(int i=0;i<count;i++) System.out.print("("); for(String s:ans) System.out.print(s); } }<file_sep> package remove.friends; import java.util.Scanner; class ChristieFriend{ int popular; ChristieFriend nextFriend; ChristieFriend(int data){ popular = data; } } class RemoveFriends { static ChristieFriend Start; void addFriend(int popular){ if(Start==null) Start = new ChristieFriend(popular); else{ ChristieFriend Temp = Start; while(Temp.nextFriend != null){ Temp = Temp.nextFriend; } Temp.nextFriend = new ChristieFriend(popular); } } void unFriend(){ boolean flag = false; ChristieFriend p= Start; if(p.popular<p.nextFriend.popular){ Start = p.nextFriend; return; } while(p.nextFriend.nextFriend != null){ if(p.nextFriend.popular<p.nextFriend.nextFriend.popular){ p.nextFriend = p.nextFriend.nextFriend; return; } p=p.nextFriend; } if(flag==false){ while(p.nextFriend.nextFriend!= null){ p=p.nextFriend; } p.nextFriend=null; } } void print(){ ChristieFriend current=Start; while(current != null){ System.out.print(current.popular+" "); current = current.nextFriend; } System.out.println(); } public static void main(String[] args) { Scanner Sc = new Scanner(System.in); int t = Sc.nextInt(); while(t-->0){ int n = Sc.nextInt(); int k = Sc.nextInt(); RemoveFriends RF = new RemoveFriends(); Start = new ChristieFriend(Sc.nextInt()); ChristieFriend NewNode = Start; while(n-->1){ NewNode.nextFriend = new ChristieFriend(Sc.nextInt()); NewNode = NewNode.nextFriend; } while(k-->0){ RF.unFriend(); } RF.print(); } } } <file_sep> package cleartax; /* package codechef; // don't place package name! */ import java.util.*; import java.lang.*; import java.io.*; import java.io.FileNotFoundException; import java.io.PrintWriter; import java.util.LinkedHashMap; import java.util.Map; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.json.simple.parser.*; /* Name of the class has to be "Main" only if the class is public. */ public class JSon_create { /** * @param args the command line arguments */ public static void main(String[] args)throws java.lang.Exception { // Java program for write JSON to a file JSONObject jo = new JSONObject(); jo.put("op", "equal"); jo.put("rhs", 20); Map m = new LinkedHashMap(3); m.put("op","add"); m.put("lhs",1); Map m1 = new LinkedHashMap(3); m1.put("op","multiply"); m1.put("lhs","x"); m1.put("rhs",6); m.put("rhs",m1); jo.put("lhs",m); PrintWriter pw = new PrintWriter("JSONExample1.json"); pw.write(jo.toJSONString()); pw.flush(); pw.close(); } } <file_sep># JavaPrograms This repository contain various programming problem solved using JAVA <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package stealth.edu_08; import java.util.Random; /** * * @author Vidyakamal */ public class StealthEdu_08 { static int Randome_upto11(){ Random rand = new Random(); return rand.nextInt(11)+1; } public static void main(String[] args) { int num; num = 11*(Randome_upto11()-1)+(Randome_upto11()); num = (num%13)+1; System.out.println(num); } }<file_sep>/* Given an unsorted array of size N. Find the first element in array such that all of its left elements are smaller and all right elements to it are greater than it. Note: Left and right side elements can be equal to required element. And extreme elements cannot be required element. Input: The first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case consists of two lines. First line of each test case contains an Integer N denoting size of array and the second line contains N space separated array elements. * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package left.small.right.greater; import java.util.Arrays; import java.util.Scanner; public class LeftSmallRightGreater { public static void main(String[] args) { Scanner in =new Scanner(System.in); int testCase=in.nextInt(); while(testCase-->0){ int size=in.nextInt(); int arr[]=new int[size]; int leftMax[]=new int[size]; int rightMax[]=new int[size]; for(int i=0;i<size;i++){ arr[i]=in.nextInt(); } int temp1=Integer.MIN_VALUE; for(int i=0;i<size;i++){ if(arr[i]>temp1){ leftMax[i]=arr[i]; temp1=arr[i]; }else { leftMax[i]=temp1; } } System.out.print(Arrays.toString(leftMax)+" "); int temp2=Integer.MAX_VALUE; for(int i=size-1;i>=0;i--){ if(arr[i]<temp2){ rightMax[i]=arr[i]; temp2=arr[i]; }else { rightMax[i]=temp2; } } System.out.println(Arrays.toString(rightMax)+" "); boolean found=false; int temp=0; for(int i=1;i<size-1;i++){ if(arr[i]>=leftMax[i] && arr[i]<=rightMax[i]){ found=true; temp=arr[i]; break; } } if(found){ System.out.println(temp); }else { System.out.println(-1); } } } }<file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package cleartax_assignment; import java.util.*; import java.io.FileReader; import java.util.Map; import org.json.simple.JSONObject; import org.json.simple.parser.*; public class ClearTax_Assignment { public static ArrayList<String> ans = new ArrayList<String>(); public static void transform(Map m,Map opt){ ans.add((String)opt.get(m.get("op"))); Map temp; try{ temp = (Map)m.get("lhs"); ans.add(String.valueOf(m.get("rhs"))); ans.add(" )"); transform(temp,opt); } catch(Exception e){ try{ temp = (Map)m.get("rhs"); ans.add(String.valueOf(m.get("lhs"))); ans.add(" )"); transform(temp,opt); } catch(Exception ex){ String s = String.valueOf(m.get("lhs")); if(s.equals("x")) ans.add(String.valueOf(m.get("rhs"))); else ans.add(s); } } } public static void parse(Object obj, Map opt, boolean c){ try{ Map m = ( Map)obj; if(c) System.out.print("( "); parse(m.get("lhs"),opt,true); System.out.print(opt.get(m.get("op"))); parse(m.get("rhs"),opt,true); if(c) System.out.print(" )"); } catch(Exception ex){ try{ String rhs = ( String) obj; System.out.print(rhs);} catch(Exception e){ Long rhs = ( Long) obj; System.out.print(rhs); } } } public static int evaluate(String expn){ char[] Temp = expn.toCharArray(); Stack<Integer> val = new Stack<Integer>(); Stack<Character> op = new Stack<Character>(); for (int i = 0; i < Temp.length; i++){ if (Temp[i] == ' '){ continue; } if (Temp[i] >= '0' && Temp[i] <= '9'){ StringBuffer sbuf = new StringBuffer(); while (i < Temp.length && Temp[i] >= '0' && Temp[i] <= '9') sbuf.append(Temp[i++]); val.push(Integer.parseInt(sbuf.toString())); } else if (Temp[i] == '(') op.push(Temp[i]); else if (Temp[i] == ')'){ while (op.peek() != '(') val.push(applyOp(op.pop(), val.pop(), val.pop())); op.pop(); } else if (Temp[i] == '+' || Temp[i] == '-' || Temp[i] == '*' || Temp[i] == '/'){ while (!op.empty() && hasPrecedence(Temp[i], op.peek())) val.push(applyOp(op.pop(), val.pop(), val.pop())); op.push(Temp[i]); } } while (!op.empty()) val.push(applyOp(op.pop(), val.pop(), val.pop())); return val.pop(); } public static boolean hasPrecedence(char op1, char op2) { if (op2 == '(' || op2 == ')') return false; if ((op1 == '*' || op1 == '/') && (op2 == '+' || op2 == '-')) return false; else return true; } public static int applyOp(char op, int b, int a) { switch (op) { case '+': return a + b; case '-': return a - b; case '*': return a * b; case '/': if (b == 0) throw new UnsupportedOperationException("Cannot divide by zero"); return a / b; } return 0; } public static void main (String[] args) throws java.lang.Exception{ Object obj = new JSONParser().parse(new FileReader("JSONExample.json")); Map opt = new LinkedHashMap(); opt.put("multiply"," * "); opt.put("divide"," / "); opt.put("add"," + "); opt.put("subtract"," - "); opt.put("equal"," = "); // Assignment 1 JSONObject jo = (JSONObject) obj; System.out.println("-----------------Ans of Assignment 1---------------"); parse(jo.get("lhs"),opt,false); System.out.print(opt.get((String) jo.get("op"))); parse(jo.get("rhs"),opt,false); // Assignment 2 System.out.println("\n"); System.out.println("-----------------Ans of Assignment 2---------------"); System.out.print("x = "); ans.add(String.valueOf(jo.get("rhs"))); try{ Map m = (Map)jo.get("lhs"); transform(m,opt); } catch(Exception e){ ans.add(String.valueOf(jo.get("rhs"))); } int count=0; for(String s:ans) if(s.equals(" )")) count++; for(int i=0;i<count;i++) System.out.print("( "); for(String s:ans) System.out.print(s); System.out.println(); // Assignment 2 String str="("; for(String s:ans) { str += s; } System.out.println(); System.out.println("-----------------Ans Of Assignment 3---------------"); System.out.println(ClearTax_Assignment.evaluate(str)); } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package linked.list; import java.util.*; class Node { int data; Node next; Node(int key) { data = key; next = null; } } class LinkedList { static Node head; public static void addToTheLast(Node node) { if (head == null) { head = node; } else { Node temp = head; while (temp.next != null) temp = temp.next; temp.next = node; } } public static void main (String[] args) { Scanner sc = new Scanner(System.in); int t=sc.nextInt(); while(t-- > 0) { int n = sc.nextInt(); int a1 = sc.nextInt(); Node head = new Node(a1); addToTheLast(head); for(int i = 1; i < n; i++) { int a = sc.nextInt(); addToTheLast(new Node(a)); } int k = sc.nextInt(); GfG gfg = new GfG(); System.out.println(gfg.nknode(head, k)); } } } class GfG { public static int nknode(Node head, int k) { Node kth = head; Node temp = head; if(k>1){ while(temp.next!=null){ for(int i=1; i<=k; i++){ if(temp.next==null){ return kth.data; } temp = temp.next; } kth= kth.next; } } else { while(temp.next!=null){ kth =kth.next; temp = temp.next; } } return kth.data; } }<file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package trappingrainwater; import java.util.Scanner; public class TrappingRainWater { int[] n; int result=0; int LeftMax=0,RightMax=0,first,last; TrappingRainWater(int[] n){ this.n = n; last = n.length - 1; first= 0; } int result(){ while(first<=last){ if(n[first]<n[last]) { if(n[first]>LeftMax) LeftMax=n[first]; else result += LeftMax-n[first]; first++; } else{ if(n[last]>RightMax) RightMax = n[last]; else result +=RightMax-n[last]; last--; } } return result; } public static void main(String[] args) { // TODO code application logic here Scanner Sc = new Scanner(System.in); int t = Sc.nextInt(); while(t>0){ int n = Sc.nextInt(); int[] arr = new int[n]; for(int i=0; i<n ; i++) arr[i] = Sc.nextInt(); TrappingRainWater kth = new TrappingRainWater(arr); System.out.println(kth.result()); t--; } } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package reverse.words; import java.util.Scanner; /** * * @author micro computer */ public class ReverseWords { /** * @param args the command line arguments */ public static void main(String[] args) { Scanner sc = new Scanner(System.in); int t =sc.nextInt(); while(t-->0){ String s = sc.next(); String result[]=s.split("\\."); for(int i =result.length-1;i>0;i--){ System.out.print(result[i]+" "); } } } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package stealth.edu_10; import java.util.Random; import java.util.Scanner; /** * * @author Vidyakamal */ public class StealthEdu_10 { String units[] = { "", "One", "Two", "Three", "Four","Five", "Six", "Seven", "Eight", "Nine", "Ten", "Eleven", "Twelve","Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen","Eighteen", "Nineteen" }; String tensMult[] = {"", "", "Twenty", "Thirty", "Forty", "Fifty", "Sixty", "Seventy", "Eighty", "ninety"}; String wordsDigit(int n){ if (n < 0) { return "Minus " + wordsDigit(-n).toLowerCase(); } if (n < 20) { return units[n]; } if (n < 100) { return tensMult[n / 10] + ((n % 10 != 0) ? " " : "")+ units[n % 10].toLowerCase(); } if (n < 1000) { return units[n / 100] + " hundred" +((n % 100 != 0) ? " and " : "")+ wordsDigit(n % 100).toLowerCase(); } return wordsDigit(n / 1000) + " thousand" + ((n % 10000 != 0) ? " " : "") + wordsDigit(n % 1000).toLowerCase(); } public static void main(String[] args) { Scanner sc = new Scanner(System.in); int n = sc.nextInt(); StealthEdu_10 SE = new StealthEdu_10(); System.out.println(SE.wordsDigit(n)); } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package reversearrayingroups; import java.util.Arrays; import java.util.Scanner; /** * * @author micro computer */ public class ReverseArrayInGroups { private int[] n; private int k; ReverseArrayInGroups(int[] n,int k){ this.n = n; this.k =k; } void print(int[] a){ int start=0,last=n.length,c=k; while(k<last){ //System.out.print(k); revers(start,k); start +=c; k +=c; } revers(start,last); //System.out.print(k+" "+last); } void revers(int start,int last){ while(last>start){ System.out.print(n[last-1]+" "); last--; } } // for( ; i<l; i++) // System.out.print(n[(l-1)-i]); public static void main(String[] args) { // TODO code application logic here Scanner Sc = new Scanner(System.in); int t = Sc.nextInt(); while(t>0){ int n = Sc.nextInt(); int k = Sc.nextInt(); int[] arr = new int[n]; for(int i=0; i<n ; i++) arr[i] = Sc.nextInt(); ReverseArrayInGroups Gr = new ReverseArrayInGroups(arr,k); Gr.print(arr); t--; } } } <file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package reversearray; import java.util.Scanner; /** * * @author micro computer */ public class ReverseArray { public static void main(String args[] ) throws Exception { Scanner sc = new Scanner(System.in); int t= sc.nextInt(); while(t-->0){ int n = sc.nextInt(); int[] arr = new int[n+1]; int[] arr2 = new int[n+1]; for(int i=1; i<=n; i++){ arr[i] = sc.nextInt(); } boolean flag = true; int i; try{ for(i=1; i<=n; i++){ if(arr2[arr[i]]==0) arr2[arr[i]]=i; else flag=false; } if(!flag) System.out.println("not inverse"); else System.out.println("inverse"); } catch(ArrayIndexOutOfBoundsException e){ i=n; System.out.println("not inverse"); } } } } <file_sep> package test; import java.util.Scanner; class Test { { //Initial Template for Java import java.util.*; class Node{ int data; Node next; Node(int d){ data=d; next=null; } } class Zeroes{ public static void main(String[] args){ Scanner sc=new Scanner(System.in); int t=sc.nextInt(); while(t-->0){ int n=sc.nextInt(); Node head=null; while(n-->0){ int a=sc.nextInt(); if(head==null){ head=new Node(a); } else{ Node temp=new Node(a); temp.next=head; head=temp; } } GfG g=new GfG(); head = g.moveZeroes(head); while(head!=null){ System.out.print(head.data+" "); head=head.next; } System.out.println(); } } } }<file_sep>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package stealth.edu_02; import java.util.*; public class StealthEdu_02 { /** * @param args the command line arguments */ public static void main(String[] args) { // TODO code application logic here Scanner sc = new Scanner(System.in); System.out.print("Enter Number:"); int sum =0; int n = sc.nextInt(); if(n<1107 || n>21000){ System.out.print("Given number is "); if (n<1107) { System.out.println("less than 1107"); } else{ System.out.println("greater than 21000"); } } else{ for(int i=1107; i<=21000; i=i+7){ if(i%7!=0){ i= i+(7-i%7); } sum += i; } System.out.println(sum); } } }
0a848d3202daed0808426de0a779277c1f97cf47
[ "Markdown", "Java", "Rust" ]
20
Java
VidyakamalNSIT/JavaCode
43f113f30896f8dd292b844f3fb14de72459617d
add7a1e0c700b7268087aea835c6b445395cc9be
refs/heads/master
<file_sep>window.onload = function(){ class Drawing extends KamiComponent { constructor() { super(); this.position = { x: 0, y: 0 } } static get tag(){ return 'drawing-exemple'; } get drawing(){ return this.wrapper.querySelector('.drawing'); } setProperties() { this.color= this.getAttribute('color') || 'black'; } connectedCallback() { this.ctx = this.drawing.getContext('2d'); this.resize(); } draw(event){ if (event.buttons !== 1) return; this.ctx.beginPath(); // begin this.ctx.lineWidth = 5; this.ctx.lineCap = 'round'; this.ctx.strokeStyle = this.color; this.ctx.moveTo(this.position.x, this.position.y); // from this.setPosition(event); this.ctx.lineTo(this.position.x, this.position.y); // to this.ctx.stroke(); // draw it! } resize() { this.ctx.canvas.width = window.innerWidth; this.ctx.canvas.height = window.innerHeight; } setPosition(event) { this.position.x = event.clientX; this.position.y = event.clientY; } renderHtml() { return ` <canvas bind:mousemove="draw" bind:mousedown="setPosition" bind:mouseup="setPosition" class="drawing"> </canvas> `; } renderStyle() { return ` .drawing{ position: fixed; } `; } } customElements.define(Drawing.tag, Drawing); }<file_sep>import KamiComponent from "../src/KamiComponent" import '@webcomponents/webcomponentsjs/custom-elements-es5-adapter'; import '@webcomponents/webcomponentsjs/webcomponents-bundle'; describe("Component test", () => { beforeAll(()=>{ customElements.define('list-exemple', List); }) it('should create a simple component',()=>{ let list: HTMLElement = document.createElement('list-exemple'); expect(list).toBeInstanceOf(HTMLElement) }) it('should get the simple component',()=>{ document.body.innerHTML = ` <list-exemple></list-exemple> `; let list: HTMLElement | null = document.querySelector('list-exemple'); if(list){ expect(list).toBeInstanceOf(HTMLElement) } }) }) //Simple component for the test class List extends KamiComponent { protected btn: HTMLElement | null; constructor() { super(); this.btn = null; } setProperties() { this.props = this.observe({ counter: 1 }) } initEventListener(){ this.btn = this.wrapper.querySelector('#add'); if(this.btn){ this.btn.addEventListener('click',()=>{ this.props.counter ++; }) } } renderHtml() { return ` <div class="list"> <button id="add">+</button> <div id="counter">${this.props.counter}</div> </div> `; } renderStyle() { return ` .list{ display: flex; } `; } }<file_sep>> **Deprecated:** This component is deprecated and will not receive any updates. Please use [kami-collection](https://github.com/Kamiapp-fr/kami-collection) instead. # `kami-component` A simple abstract class to create web compenent. This lib is a simple helper to create web component. * [Getting Started](#getting-started) * [NPM](#npm) * [CDN](#cdn) * [Life Hook](#life-hook) * [Set Properties](#set-properties) * [Render Html](#render-html) * [Render Style](#render-style) * [Template Binding](#template-binding) * [Event Listener](#event-listener) * [Contribute](#contribute) ## Getting Started >Into the getting started, you will see the code of a counter component. <p align="center"> <img src="https://emilienleroy.fr/assets/counter.gif"> </p> ### NPM Install the lib from *npm*. ``` npm install --save kami-component ``` After just import it and extend your class with this. > See **[counter example](https://github.com/Kamiapp-fr/kami-component/tree/master/example/counter)**. To declare you component, just import your component into your *index.js* file and use the *customElements.define()* methode. See the example bellow. >index.js ```js import Counter from "./Counter"; //declare your web component window.customElements.define('counter-example', Counter); ``` ### CDN You can get kami-component via this cdn `https://cdn.jsdelivr.net/npm/kami-component`. After add this file into your html with a script dom. And just extend your component with this class. At the end of your js you need to declare your component with the *customElements.define()* methode. See the example bellow. >index.html ```html <body> <counter-exemple></counter-exemple> </body> <script src="https://cdn.jsdelivr.net/npm/kami-component"></script> <script src="main.js"></script> ``` ## Life Hook > This is a diagram for the kami-component life cycle. <p align="center"> <img src=".github/hooks.png?raw=true"> </p> ### Set Properties The `setProperties()` is the first method called. It's here you will get props from your component. You need to override this method to init your props. This is an example : ```js //drawing.js setProperties() { this.color = this.getAttribute('color') || 'black'; } ``` and the *html* part : ```html <drawing-exemple color="green"></drawing-exemple> ``` You can also use reactive props. To use this you need to use `this.props` property and the `observe()` method. With this when a data from `this.props` is update, the component will re-render the template part. This is a reactive example : ```js // you need to observe your attribute to use reactive static get observedAttributes() {return ['counter']; } // init reactive props setProperties() { this.props = this.observe({ counter: parseInt(this.getAtribute('counter')) | 1 }) } // after just display your data like this. renderHtml() { return ` <div class="counter"> ${this.props.counter} </div> `; } ``` and the *html* part : ```html <drawing-exemple counter="0"></drawing-exemple> ``` > See [drawing example](https://github.com/Kamiapp-fr/kami-component/tree/master/example/drawing) for simple props and [counter example](https://github.com/Kamiapp-fr/kami-component/tree/master/example/counter) for reactive props. ### Render Html The `renderHtml()` method will generate your template component. For this just override the `renderHtml()`. This method should return a **string**. ### Render Style Same as `renderHtml()`, the `renderStyle()` method will generate your component style. For this just override the `renderStyle()`. This method should return a **string**. ### Template binding With KamiComponent you can directly bind your event listener into your template. To work you juste need to add into a dom the `bind:<type>` attribute. You can replace the type part by every listener type you want like *click* or *mouseover*. This is an example : ```js /** * ... more code */ // update the current counter number updateCounter(to) { this.props.counter = this.props.counter + parseInt(to); this.setUrlParam('counter',this.props.counter); return this; } // Listener add directly from the template. renderHtml() { return ` <div class="counter"> <button id="add" class="counter__btn" bind:click="updateCounter(1);" >+</button> <button id="remove" class="counter__btn" bind:click="updateCounter(-1)" >-</button> <div class="counter__text" id="counter">${this.props.counter}</div> </div> `; } ``` In this example when you *click* on a button the `updateCounter()` will be call. This is due at this attribute `bind:click="updateCounter(-1)"` which will add the click event listener. If you want get the event emit you just need to update your method like this : ```js updateCounter(to, event) { event.preventDefault() // this is just an example } ``` Event emit is always add in last params when the listener is fire. > To see more example, go into the **example folder** of this repository. ### Event Listener If you don't use the [template binding](#template-binding) you can init your event listener into the `initEventListener()` method. > ⚠️ If you use reactive props without [template binding](#template-binding) you should add your listener into the `initEventListener()` method. Because when the template is re-render, listener are also remove. That's why the `initEventListener()` method is call when a reactive props is update. ## Contribute We would love you for the contribution to ``kami-component`` project, check the ``CONTRIBUTING`` file for more info.<file_sep>// Import here Polyfills if needed. Recommended core-js (npm i -D core-js) // import "core-js/fn/array.find" import '@webcomponents/webcomponentsjs/custom-elements-es5-adapter'; import '@webcomponents/webcomponentsjs/webcomponents-bundle'; abstract class KamiComponent extends HTMLElement { /** * You should override this getter to return your own tag name for your component. * @example * // counter.js * static get tag(){ * return 'counter-example'; * } * * @example * // index.html * customElements.define(Counter.tag, Counter); * * @static * @property {string} tag - tag name */ static get tag() { throw new Error('Your component should have a tag !'); } /** * @property {URL} url - an URL instance */ protected url: URL; /** * @property {ShadowRoot} shadow - main shadow root */ protected shadow: ShadowRoot; /** * Use this property to query your component. * @example * get counter() { * return this.wrapper.querySelector('#counter'); * } * @property {HTMLDivElement} wrapper - main div wrapper */ protected wrapper: HTMLDivElement; /** * @property {HTMLStyleElement} styleScope - main style dom */ protected styleScope: HTMLStyleElement; /** * If this component is observable this property is set as true. * @property {boolean} isObservable - observable state */ protected isObservable: boolean; /** * If this property is set at true. All props will be sync to an attribute. * @property syncProps */ private syncProps: boolean; /** * @property {any} props */ protected props: any; constructor({ syncProps = false } = {}) { // Always call super first in constructor super(); this.syncProps = syncProps; this.isObservable = false; this.url = new URL(window.location.href); // init props from children this.setProperties(); /** * @property {HTMLElement} shadow - the shadow root of your component */ this.shadow = this.attachShadow({ mode: 'open' }); /** * Use this dom to get children. * Call the querySelector directly from this property. * @property {HTMLDivElement} wrapper - main dom of your component */ this.wrapper = document.createElement('div'); /** * @property {HTMLStyleElement} styleScope - style dom */ this.styleScope = document.createElement('style'); // set the type for the style dom // tslint:disable-next-line: deprecation this.styleScope.type = 'text/css'; // generate the style and dom of your component this.render(); // append your component to the shadow root // display the component this.initComponent(); // init all your event listener this.initEventListener(); } /** * Overide this method to add your event listener. * This method will be call if you use the observe() method. */ protected initEventListener(): void { return void 0; } /** * Call when a prop is update. Only call if the ``isObservable`` property is at ``true`` * @param name - prop name * @param value - value of prop */ protected propChangedCallback(name: string, value: any): void { return void 0; } /** * Adds the specified prop to ``this.props`` * @param name - prop name * @param value - value of the prop */ protected setProp(name: string, value: any): void { if (name && value && typeof this.props === 'object') { this.props[name] = value; } } /** * Returns the value of the prop with the specified name, of `this.props`. * @param name - prop name */ protected getProp(name: string): any { if (this.props && typeof this.props === 'object') { return this.props[name]; } } /** * This methode it use be the child methode to pass * all the properties which need the parent to work */ abstract setProperties(): void; /** * This methode it use be the child methode to pass * the html template for the shadows root */ abstract renderHtml(): string; /** * This methode it use be the child methode to pass * the style template for the shadows root */ abstract renderStyle(): string; /** * This methode update your attribute set in the props object. * @param {String} name - the attribute name * @param {String} oldValue - the old value * @param {String} newValue - the new value */ attributeChangedCallback(name: string, oldValue: any, newValue: any): void { if (this.isObservable && oldValue !== newValue) { this.props[name] = newValue; } } /** * This methode will observer the target which you have pass in param. * When one of the property of your target is set the render() and initEventlistener() will be call. * Which reload dynamicaly your component. * @param {Object} target - object which will be observed * @returns {ProxyConstructor} */ observe(target: Object): ProxyConstructor { this.isObservable = true; // create a proxy to observe your props return new Proxy(target, { // just return your props get: (obj: any, prop: string) => { return obj[prop]; }, // rerender your component and his listener set: (obj, prop, value) => { // set the props value obj[prop] = value; if (this.syncProps) { this.setAttribute(prop as string, value); } // rerender the component this.render(); // reload listener this.initEventListener(); this.propChangedCallback(prop as string, value); return true; } }); } /** * Generate the dom structure and style of your component. * It will update the wrapper and styleScope property. * @returns {Component} this */ render(): this { // reload dom structure this.wrapper.innerHTML = this.renderHtml(); // reload style this.styleScope.textContent = this.renderStyle(); // bind attribute to all element in the wrapper this.bindAttributes(this.wrapper); return this; } /** * Init the web component */ initComponent(): void { this.shadow.appendChild(this.styleScope); this.shadow.appendChild(this.wrapper); } /** * This method convert your string to an html element like the *document.createElement()* method. * There are a litte diff with this. You should pass directly the template of you element. * @example * this.createElement(`<div id="new" class="test">your dom</div>`) * * @param {string} html - an string which contain a html element * @return {Element | null} html element create. */ protected createElement(html: string): Element | null { let element: Element = document.createElement('div') as Element; element.innerHTML = html; return element.firstElementChild; } /** * Convert a String into a boolean * @param {String} val - the data to convert in bool * @returns {Boolean} the boolean converted */ toBoolean(val: any): boolean { let a: any = { true: true, false: false }; return a[val]; } /** * This method will parse all element into the main HTMLelement. * if an element have an attribute which begin by "bind:" it will call the *addBindsListener()* method * with the element and the attribute in params. * else nothing happens. * @param {HTMLElement} html - parent element * @return {void} */ protected bindAttributes(html: HTMLElement): void { // parse all child element html.querySelectorAll('*').forEach((el: Element) => { // parse all attributes. Array.from(el.attributes).forEach((attr: Attr) => { // add listeners only if the attr begin by bind: if (attr.nodeName.startsWith('bind:')) { this.addBindsListener(el, attr); } }); }); } /** * Parse all functions in the attr params and call the *bindListener* for each function. * @param {Element} html - element which will add listener * @param {Attr} attr - attr to parse * @return {void} */ protected addBindsListener(html: Element, attr: Attr): void { if (attr.nodeValue) { // parse the type of the listener const type: Event = new Event(attr.nodeName.split(':')[1]); // parse the function to call from the attr nodeValue attr.nodeValue.split(';').forEach(functionToCall => { this.bindListener(html, functionToCall.replace(/ /g, ''), type); }); } } /** * Parse the function name to get params and add listener to the Element. * @param {Element} html - element which will add listener * @param {string} functionToCall - name of the function to call * @param {Event} type - type of listener * @return {void} */ protected bindListener(html: Element, functionToCall: string, type: Event) { if (functionToCall) { // parse function. const functionName: string = this.parseFunctionName(functionToCall); const params: string[] | null = this.parseParams(functionToCall); // get the function to call. const event = (this as { [key: string]: any })[functionName].bind(this); // add listener only if event is a function. if (typeof event === 'function') { html.addEventListener(type.type, (e: Event) => { params ? event(...params, e) : event(e); }); } else { throw new TypeError(`${functionToCall} is not a function !`); } } } /** * Get all params from a string function. * @param {string} str - function name with param in string * @return {string[]|null} all params in the function * * @example * this.parseParams('test') // return null * this.parseParams('test()') // return null * this.parseParams('test(10)') // return ['10'] * this.parseParams('test(10,12)') // return ['10','12'] */ protected parseParams(str: string): string[] | null { const args = /\(\s*([^)]+?)\s*\)/.exec(str); return args && args[1] ? args[1].split(/\s*,\s*/) : null; } /** * Get function name. * @param {string} str - function name with param in string * @returns {string} function name * * @example * this.parseFunctionName('test') // return 'test' * this.parseFunctionName('test()') // return 'test' * this.parseFunctionName('test(10)') // return 'test' */ protected parseFunctionName(str: string) { return str.split('(')[0]; } /** * Get a param form the url. * @param {String} param - the param name */ getUrlParam(param: string): string | null { return this.url.searchParams.get(param); } /** * Set or update the value of a param into the browser url. * @param {Object} object * @param {String} object.param - the param name * @param {String} object.value - the value * @returns {Component} this */ setUrlParam(param: string, value: string): this { // boolean to check if a update url is needed let newUrl = false; if (value.toString() !== '') { // check if the param already exist this.getUrlParam(param) ? // update the param this.url.searchParams.set(param, value) : // add the param this.url.searchParams.append(param, value); // update url is needed newUrl = true; } // check if value param is empty if (value.toString() === '' && this.getUrlParam(param) && !newUrl) { // delete a param this.url.searchParams.delete(param); // update url is needed newUrl = true; } if (newUrl === true) { // update the browser url window.history.pushState({}, '', this.url.toString()); } return this; } } export default KamiComponent; <file_sep># Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.0.6] - 2020-04-03 ### Added - Option `syncProp` to synchronise a prop to an attribute. - Adding `getProp()` and `setProp()` to prevent undefined state of `this.prop` object on `renderHtml()` - Adding github action to deploy automaticaly on npm. ## [0.0.5] - 2019-11-29 ### Added - Template binding. To add listener directly from the template. ### Changed - Main content of the README with more explanation of the life hook. ## [0.0.4] - 2019-11-19 ### Added - createElement() method. - Life Hook schema. ## [0.0.3] - 2019-07-18 ### Added - Static tag property. ### Changed - Set optional initEventListener method. ## [0.0.2] - 2019-06-27 ### Added - Readme with example for es6 and umd. ### Changed - Lib name from *Component* to *KamiComponent*. - Improve the counter example.s ## [0.0.1] - 2019-06-26 ### Added - the core lib. - a simple example.
34450ec0344cb2711ed298e0da2b28c346c37afd
[ "JavaScript", "TypeScript", "Markdown" ]
5
JavaScript
Kamiapp-fr/kami-component
4f8a6d298b6a37818cc52947195171635df787b6
1b5893ae4cfe6bcf2266f35fa0dcb1b93da33a5d
refs/heads/master
<file_sep>package vaccapi import ( "github.com/spf13/viper" "crypto/rsa" "crypto/rand" "crypto/x509" "encoding/pem" log "github.com/sirupsen/logrus" ) const timeZone string = "GMT-8" const clientKey string = "<KEY>" const secret string = "<KEY>" const mainUrlFormat string = "https://eco-{country}-api.ecovacs.com/v1/private/{country}/{lang}/{deviceId}/{appCode}/{appVersion}/{channel}/{deviceType}" const pemPublicKey_s string = `-----BEGIN PUBLIC KEY----- <KEY> -----END PUBLIC KEY-----` var publicKey *rsa.PublicKey type metaData struct { country string lang string deviceId string appCode string appVersion string channel string deviceType string } type Vaccapi struct { meta metaData } func (api Vaccapi) login() { } func (api Vaccapi) signParams(params map[string]string) { res := make(map[string]string) res["authTimespan"] = 0 res["authTimeZone"] = timeZone res["country"] = api.meta.country res["lang"] = api.meta.lang res["deviceId"] = api.meta.sdeviceId res["appCode"] = api.meta.appCode res["appVersion"] = api.meta.appVersion res["channel"] = api.meta.channel res["deviceType"] = api.meta.deviceType sign_text = clientKey // TODO : /* * ClientKey + (k + '=' + res[k] for k in keys(res)) + secret */ } func encrypt(payload string) []byte { payload_b := []byte(payload) ciphertext, err := rsa.EncryptPKCS1v15(rand.Reader, publicKey, payload_b) if err != nil { log.WithFields(log.Fields{"payload_str": payload}).Error("Unable to encrypt payload string") } return ciphertext } func initializePublicKey() *rsa.PublicKey { pemPublicKey := []byte(pemPublicKey_s) block, _ := pem.Decode(pemPublicKey) // ignore the remaining data if block == nil || block.Type != "PUBLIC KEY" { log.WithFields(log.Fields{"block": block}).Fatal("Unable to decode PEM public key") } pub, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { log.Fatal("Unable to parse PKIX public key") } return pub.(*rsa.PublicKey) } func init() { publicKey = initializePublicKey() } // Factory method to create a new api instance func NewVaccapi() Vaccapi { var api Vaccapi api.meta = metaData{ country: "us", lang: "en", deviceId: "test", appCode: "test", appVersion: "test", channel: "test", deviceType: "test"} return api }<file_sep>package main import ( "fmt" "suceanne/vaccapi" ) func main() { fmt.Println(vaccapi.Encrypt("TEST")) }<file_sep>package config import ( "fmt" "github.com/spf13/viper" ) func setupDefaults() { viper.SetDefault("baseurl", "msg-na.ecouser.net") viper.SetDefault("deviceId", "suceanne-1.0") viper.SetDefault("countryCode", "us") viper.SetDefault("continentCode", "na") } func initializeConfig() { viper.SetConfigName("config") viper.AddConfigPath("etc/") viper.SetConfigType("yaml") setupDefaults() err := viper.ReadInConfig() if err != nil { panic(fmt.Errorf("Fatal error while reading config file: %s", err)) } } func init() { initializeConfig() } <file_sep>from Crypto.PublicKey import RSA from base64 import b64decode, b64encode key_str = '<KEY>' key = RSA.import_key(b64decode(key_str)) binPubKey = key.publickey().exportKey('PEM') print binPubKey
2604e11e828eb6c3bf56ce0e8c0836a92456ebea
[ "Python", "Go" ]
4
Go
jell0wed/suceanne
54443b1c152ce177ce569b18072b0e4ccb3713a9
c8cf4efa55fb5d242283d94beeaee9420cd842be
refs/heads/master
<file_sep><?php if (!defined('IN_TG')) { exit('Access Denied!'); } ?> <div id="footer" class="footer"> <p>程序运行时间:<?php echo round((_runtime() - START_TIME),4); ?>秒</p> <p>版权所有 翻版必究</p> <p>本程序由<span>瓢城Web俱乐部</span>提供 源代码可以任意修改或发布 (c) yc60.com</p> </div> <file_sep><?php define('IN_TG',true); define('SCRIPT','index'); require dirname(__FILE__).'/includes/common.inc.php'; ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Your Title</title> <?php require ROOT_PATH.'includes/title.inc.php'; ?> </head> <body> <?php include ROOT_PATH.'includes/header.inc.php'; ?> <div id="list" class="list"> <h2>帖子列表</h2> </div> <div id="user" class="user"> <h2>新进会员</h2> </div> <div id="pics" class="pics"> <h2>最新图片</h2> </div> <?php include ROOT_PATH. 'includes/footer.inc.php'; ?> </body> <file_sep><?php define('IN_TG',true); define('SCRIPT','face'); require dirname(__FILE__).'/includes/common.inc.php'; ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>BBS - face select</title> <?php require ROOT_PATH.'includes/title.inc.php'; ?> <script type="text/javascript" src="js/opener.js"></script> </head> <body> <div id="face"> <h3>select face</h3> <dl> <?php foreach (range(1,9) as $num) { ?> <dd><img src="face/m0<?php echo $num?>.gif" alt="face/m0<?php echo $num?>.gif" title="head<?php echo $num?>" /></dd> <?php } ?> </dl> <dl> <?php foreach (range(10,64) as $num) { ?> <dd><img src="face/m<?php echo $num?>.gif" alt="face/m<?php echo $num?>.gif" title="head<?php echo $num?>" /></dd> <?php } ?> </dl> </div> </div> </body> <file_sep><?php // prevent illegal accessing if (!defined('IN_TG')) { exit('Access Denied!'); } if (!defined('SCRIPT')) { exit('Script error!'); } ?> <link rel="shortcuticon" href="favicon.ico" /> <link rel="stylesheet" href="styles/1/basic.css" type="text/css" media="screen" title="no title" charset="utf-8"> <link rel="stylesheet" href="styles/1/<?php echo SCRIPT?>.css" type="text/css" media="" title="no title" charset="utf-8"> <file_sep><?php if (!defined('IN_TG')) { exit('Access Denied!'); } define(ROOT_PATH,substr(dirname(__FILE__),0,-8)); if (PHP_VERSION < '4.1.0') { exit('Version is too LOW!'); } require ROOT_PATH.'includes/global.func.php'; define('START_TIME', _runtime()); ?>
83685ca677e117b5b50de741bdd22b9f07a1066d
[ "PHP" ]
5
PHP
kirtgoh/php-web
a0f003b0043f24d4680a0d31903262ba5348710e
c892434fd845c3244553375521cdf343d5d94c4a
refs/heads/master
<repo_name>davidkavanaugh/buck-island-farm<file_sep>/src/components/Store/StoreNav/StoreNav.js import React from 'react'; import PropTypes from 'prop-types'; import { withStyles } from '@material-ui/core/styles'; import AppBar from '@material-ui/core/AppBar'; import Toolbar from '@material-ui/core/Toolbar'; import Headroom from 'react-headroom'; import Logo from './Logo/Logo'; import Button from '@material-ui/core/Button'; import './StoreNav.css'; const styles = { root: { flexGrow: 1, }, grow: { flexGrow: 1, } }; function StoreNav(props) { const { classes } = props; return ( <Headroom> <div id="storeNav" className={classes.root}> <AppBar position="static"> <Toolbar> <Logo /> <span className="spacer"></span> <div className="cart-btn"> <Button variant="outlined" size="small" color="inherit" className='snipcart-checkout snipcart-summary'> <i className="material-icons">shopping_cart</i> cart (<span className="snipcart-total-items"></span>) </Button> </div> </Toolbar> </AppBar> </div> </Headroom> ); } StoreNav.propTypes = { classes: PropTypes.object.isRequired, }; export default withStyles(styles)(StoreNav);<file_sep>/src/components/Store/StoreNav/Logo/Logo.js import React, { Component } from 'react'; import { withStyles } from '@material-ui/core/styles'; import Typography from '@material-ui/core/Typography'; import './Logo.css'; import LogoImg from '../../../../assets/farmlogo.jpg'; const styles = { root: { flexGrow: 1, }, grow: { flexGrow: 1, } }; class Logo extends Component { render() { const { classes } = this.props; return ( <Typography variant="h6" color="inherit" className={classes.grow}> <a id="logo" href="/" > <img className="farm-logo" src={LogoImg} alt="farm logo" /> </a> </Typography> ) } } export default withStyles(styles)(Logo); <file_sep>/README.md # Buck Island Farm ## Farm Store Web App made with [this react small business template](https://github.com/davidkavanaugh/SmallBusinessTemplate)<file_sep>/src/components/Store/Store.js import React, { Component } from 'react'; import { Route, Switch } from "react-router"; import { BrowserRouter as Router } from 'react-router-dom'; import StoreNav from './StoreNav/StoreNav'; import Product from './Product/Product'; import ProductList from './ProductList/ProductList'; import './Store.css'; class Store extends Component { render() { return ( <div id="store"> <StoreNav /> <Router> <Switch> <Route path="/product" exact component={ProductList} /> <Route path="/product/:id" component={Product} /> </Switch> </Router> </div> ); } } export default Store;
3f9a01e7dda7d508e53823c5046cccff7bbd78d0
[ "JavaScript", "Markdown" ]
4
JavaScript
davidkavanaugh/buck-island-farm
cbf54d9cf0d1f21bb485bbf207acf2874a30b15c
1d22983bdc79c484d71009a9e3ed2454af663b0d
refs/heads/master
<repo_name>maidmaid/numberplate<file_sep>/src/NumberPlate/Captcha.php <?php namespace NumberPlate; use TesseractOCR; class Captcha { static public function decode($filename = 'http://www.vs.ch/cari-online/drawCaptcha') { $time = microtime(true); $filenameOriginal = __DIR__ . '/../../cache/' . $time . '_o.jpg'; $filenameEdit = __DIR__ . '/../../cache/' . $time . '_e.jpg'; // clean clache static::cleanCache(); // Captcha $captcha = imagecreatefromjpeg($filename); imagejpeg($captcha, $filenameOriginal, 100); list($width, $height) = getimagesize($filenameOriginal); // Crée une nouvelle image contrastée $edit = imagecreate($width, $height); imagecolorallocate($edit, 255, 255, 255); $black = imagecolorallocate($edit, 0, 0, 0); for($x = 0; $x < $width; $x++) { for($y = 0; $y < $height; $y++) { $rgb = imagecolorat($captcha, $x, $y); $colors = imagecolorsforindex($captcha, $rgb); if($colors['red'] == 255 && $colors['green'] == 255 && $colors['blue'] == 255) { imagesetpixel($edit, $x, $y, $black); } } } imagejpeg($edit, $filenameEdit, 100); // Analase OCR $ocr = new TesseractOCR($filenameEdit); $ocr->setTempDir(__DIR__ . '/../../cache/ocr'); $ocr->setWhitelist(range('0', '9'), range('a', 'z'), range('A', 'Z')); $decoded = $ocr->recognize(); $decoded = str_replace(' ', '', $decoded); return $decoded; } static public function cleanCache() { $cache = __DIR__ . '/../../cache/'; $exclude = array('.', '..', 'ocr', 'index.html'); // clean cache/ $files = scandir($cache, SCANDIR_SORT_DESCENDING); foreach($files as $i => $file) { if ($i < 20 || in_array($file, $exclude)) { continue; } unlink($cache . $file); } // clean cache/ocr/ $files = scandir($cache . 'ocr', SCANDIR_SORT_DESCENDING); foreach($files as $file) { if(!in_array($file, $exclude)) { unlink($cache . 'ocr/' . $file); } } } }<file_sep>/tests/NumberPlate/SearcherTest.php <?php namespace NumberPlate; use PHPUnit_Framework_TestCase; use Symfony\Component\EventDispatcher\Event; use Symfony\Component\EventDispatcher\GenericEvent; require_once __DIR__.'/../../vendor/autoload.php'; class SearcherTest extends PHPUnit_Framework_TestCase { public function testSearch() { $searcher = new Searcher(); $data = $searcher->search('77729'); $this->assertNotEmpty($data); } public function testEvents() { $events = array( 'cookie.initialize' => false, 'captcha.download' => false, 'captcha.decode' => false, 'search.send' => false, 'error.return' => false ); $searcher = new Searcher(); $searcher->getDispatcher()->addListener('cookie.initialize', function(GenericEvent $e) use(&$events) { $events['cookie.initialize'] = true; }); $searcher->getDispatcher()->addListener('captcha.download', function(Event $e) use(&$events) { $events['captcha.download'] = true; }); $searcher->getDispatcher()->addListener('captcha.decode', function(GenericEvent $e) use(&$events) { $events['captcha.decode'] = true; }); $searcher->getDispatcher()->addListener('search.send', function(GenericEvent $e) use(&$events) { $events['search.send'] = true; }); $searcher->getDispatcher()->addListener('error.return', function(GenericEvent $e) use(&$events) { $events['error.return'] = true; }); $searcher->search('77729'); $this->assertTrue($events['cookie.initialize']); $this->assertTrue($events['captcha.download']); $this->assertTrue($events['captcha.decode']); $this->assertTrue($events['search.send']); } }<file_sep>/README.md numberplate =========== <file_sep>/src/NumberPlate/Searcher.php <?php namespace NumberPlate; use GuzzleHttp\Client; use GuzzleHttp\Cookie\CookieJar; use Symfony\Component\DomCrawler\Crawler; use Symfony\Component\EventDispatcher\EventDispatcher; use Symfony\Component\EventDispatcher\GenericEvent; class Searcher { /* @var $dispatcher EventDispatcher */ private $dispatcher; /* @var $client Client */ private $client; /* @var $jar CookieJar */ private $jar; private $lastError; public function __construct() { $this->lastError = ''; $this->client = new Client(); $this->jar = new CookieJar(); $this->dispatcher = new EventDispatcher(); } /** * * @return EventDispatcher */ public function getDispatcher() { return $this->dispatcher; } public function getLastError() { return $this->lastError; } public function search($numberPlate) { $options = array('cookies' => $this->jar); // Initialise les cookie if($this->jar->count() == 0) { $this->client->get('http://www.vs.ch/cari-online/rechDet', $options); $this->dispatcher->dispatch('cookie.initialize', new GenericEvent($this->jar->toArray())); } // Traite le captcha $captchaVal = ''; while(strlen($captchaVal) != 6) // 6 Chars { // Download captcha $captcha = $this->client->get('http://www.vs.ch/cari-online/drawCaptcha', $options); file_put_contents('captcha.png', $captcha->getBody()->__toString()); $this->dispatcher->dispatch('captcha.download'); // Decode captcha $captchaVal = Captcha::decode('captcha.png'); $this->dispatcher->dispatch('captcha.decode', new GenericEvent($captchaVal)); unlink('captcha.png'); } // Envoie la recherche $options['body'] = array( 'pageContext' => 'login', 'action' => 'query', 'no' => $numberPlate, 'cat' => '1', 'sousCat' => '1', 'captchaVal' => $captchaVal, 'valider' => 'Continuer' ); $response = $this->client->post('http://www.vs.ch/cari-online/rechDet', $options); $this->dispatcher->dispatch('search.send', new GenericEvent($response)); // Crawler $html = $response->getBody()->__toString(); $crawler = new Crawler($html); // Traitements des erreurs $data = array(); $e = count($e = $crawler->filter('#idDivError')) ? $e->text() : ''; $error = utf8_decode(trim($e)); if(empty($error)) { $lines = $crawler->filter('table')->eq(5)->filter('tr'); $filter = function($line) use(&$lines) { $r = count($r = $lines->eq($line)->filter('td')->eq(1)) ? $r->text() : ''; return utf8_decode(trim($r)); }; $data['numberplate'] = $filter(0); $data['category'] = $filter(1); $data['subcategory'] = $filter(2); $data['name'] = $filter(3); $data['address'] = $filter(4); $data['complement'] = $filter(5); $data['locality'] = $filter(6); } else { $this->lastError = $error; $this->dispatcher->dispatch('error.return', new GenericEvent($error)); if(strpos($error, 'Code incorrect') !== false) { $data = $this->search($numberPlate); } } return $data; } }<file_sep>/src/NumberPlate/AbstractSearcherSubscriber.php <?php namespace NumberPlate; use Symfony\Component\EventDispatcher\Event; use Symfony\Component\EventDispatcher\EventSubscriberInterface; use Symfony\Component\EventDispatcher\GenericEvent; abstract class AbstractSearcherSubscriber implements EventSubscriberInterface { public static function getSubscribedEvents() { return array( 'cookie.initialize' => array('onCookieInitialize', 0), 'captcha.download' => array('onCaptchaDownload', 0), 'captcha.decode' => array('onCaptchaDecode', 0), 'search.send' => array('onSearchSend', 0), 'error.return' => array('onErrorReturn', 0) ); } public function onCookieInitialize(GenericEvent $e) { } public function onCaptchaDownload(Event $e) { } public function onCaptchaDecode(GenericEvent $e) { } public function onSearchSend(GenericEvent $e) { } public function onErrorReturn(GenericEvent $e) { } }
c21e305830891672e0adfba0fd303a569058955d
[ "Markdown", "PHP" ]
5
PHP
maidmaid/numberplate
3319681dc8e794a4e8457f563a791d8ce43aed6f
2c823238b7f5a78f637f5cfb4aa6abb84c18385b
refs/heads/main
<file_sep>package cl.edutecno.dto; import java.util.List; import cl.edutecno.model.Pasajero; import lombok.AllArgsConstructor; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; @Data @NoArgsConstructor @AllArgsConstructor @EqualsAndHashCode(callSuper = true) public class PasajeroDTO extends GenericDTO{ private List<Pasajero>pasajeros; public PasajeroDTO(List<Pasajero>pasajeros,String mensaje, String codigo) { super(mensaje, codigo); this.pasajeros=pasajeros; } } <file_sep>package cl.edutecno.controller; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.ModelAttribute; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.servlet.ModelAndView; import org.springframework.web.servlet.view.RedirectView; import cl.edutecno.dto.PasajeroDTO; import cl.edutecno.model.Pasajero; import cl.edutecno.service.PasajeroService; @Controller @RequestMapping("pasajeroNuevo") public class NuevoPasajeroController { @Autowired private PasajeroService pasajeroService; @GetMapping public ModelAndView pasajeros() { ModelAndView modelAndView= new ModelAndView("pasajeroNuevo"); modelAndView.addObject("pasajero", new Pasajero()); return modelAndView; } @PostMapping("/agregar") public RedirectView agregar(@ModelAttribute Pasajero pasajero) { PasajeroDTO respuestaServicio= pasajeroService.add(pasajero); if(respuestaServicio.getCodigo().equals("0")) { return new RedirectView("/pasajeros"); }else { return new RedirectView("/pasajeroNuevo"); } } } <file_sep>package cl.edutecno.model; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.SequenceGenerator; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; @Data @Entity @AllArgsConstructor @NoArgsConstructor @SequenceGenerator(name = "SQ_DESTINO", initialValue = 1, allocationSize = 1) public class Destino { @Id @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "SQ_DESTINO") private Integer id; private String ciudad; private String pais; private String fecha; @ManyToOne @JoinColumn(name="pasajero_id", referencedColumnName = "id") private Pasajero pasajero; } <file_sep>package cl.edutecno.controller; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.servlet.ModelAndView; import cl.edutecno.service.DestinoService; @Controller @RequestMapping("destinos") public class DestinoController { @Autowired private DestinoService destinoService; @GetMapping public ModelAndView destinoLista() { ModelAndView modelAndView= new ModelAndView("destinos"); modelAndView.addObject("destino", destinoService.findAll().getDestinos()); return modelAndView; } } <file_sep>driver=oracle.jdbc.driver.OracleDriver url=jdbc:oracle:thin:@localhost:1521:orcl dbuser=controlPasajeros dbpassword=<PASSWORD> <file_sep>package cl.edutecno.service; import cl.edutecno.dto.DestinoDTO; import cl.edutecno.model.Destino; public interface DestinoService { DestinoDTO findAll(); DestinoDTO add(Destino destino); } <file_sep>package cl.edutecno.service; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import cl.edutecno.dto.PasajeroDTO; import cl.edutecno.model.Pasajero; import cl.edutecno.repository.PasajeroRepository; @Service public class PasajeroServiceImpl implements PasajeroService{ private static final Logger log = LoggerFactory.getLogger(PasajeroServiceImpl.class); @Autowired private PasajeroRepository daoPasajero; private PasajeroDTO respuestaPasajero; @Override @Transactional(readOnly = true) public PasajeroDTO findAll() { respuestaPasajero=new PasajeroDTO(new ArrayList<Pasajero>(),"Ha ocurrido un error", "101"); try { respuestaPasajero.setPasajeros((List<Pasajero>)daoPasajero.findAll()); respuestaPasajero.setMensaje(String.format("Se ha/n encontrado %d registro/s", respuestaPasajero.getPasajeros().size())); respuestaPasajero.setCodigo("0"); } catch (Exception e) { log.trace("PasajeroService: Error en findAll", e); } return respuestaPasajero; } @Override @Transactional public PasajeroDTO add(Pasajero pasajero) { respuestaPasajero=new PasajeroDTO(new ArrayList<Pasajero>(),"Ha ocurrido un error", "102"); try { daoPasajero.save(pasajero); respuestaPasajero.setMensaje(String.format("Se ha guardado correctamente el pasajero %s", pasajero.getNombre()+" "+pasajero.getApellido())); respuestaPasajero.setCodigo("0"); } catch (Exception e) { log.trace("PasajeroService: error en add()",e); } return respuestaPasajero; } } <file_sep>package cl.edutecno.controller; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.ModelAttribute; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.servlet.ModelAndView; import org.springframework.web.servlet.view.RedirectView; import cl.edutecno.dto.DestinoDTO; import cl.edutecno.model.Destino; import cl.edutecno.service.DestinoService; import cl.edutecno.service.PasajeroService; @Controller @RequestMapping("destinoNuevo") public class NuevoDestinoController { private static final Logger log = LoggerFactory.getLogger(NuevoDestinoController.class); @Autowired private DestinoService destinoService; @Autowired private PasajeroService pasajeroService; @GetMapping public ModelAndView destinos(Model model) { ModelAndView modelAndView= new ModelAndView("destinoNuevo"); modelAndView.addObject("destino", new Destino()); model.addAttribute("pasajeros", pasajeroService.findAll().getPasajeros()); return modelAndView; } @PostMapping("/agregar") public RedirectView agregar(@ModelAttribute Destino destino) { DestinoDTO respuestaServicio= destinoService.add(destino); if(respuestaServicio.getCodigo().equals("0")) { return new RedirectView("/destinoNuevo"); }else { return new RedirectView("/destinos"); } } } <file_sep>package cl.edutecno.model; import java.util.List; import javax.persistence.CascadeType; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.OneToMany; import javax.persistence.SequenceGenerator; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; @Data @Entity @AllArgsConstructor @NoArgsConstructor @SequenceGenerator(name = "SQ_PASAJERO", initialValue = 1, allocationSize = 1) public class Pasajero { @Id @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "SQ_PASAJERO") private Integer id; private String rut; private String nombre; private String apellido; private Integer edad; private String ciudadNatal; @OneToMany(cascade=CascadeType.ALL, targetEntity=Destino.class) @JoinColumn(name="id") private List<Destino> destino; }
2d4dea47767aa78d17a02e4f30e5535fe6838e43
[ "Java", "INI" ]
9
Java
PauloGarcesCarraco/SistemaAdministracionVuelos
a7cf7c660ee5e9fbbd44aec13b7bc37e824c9302
c34f60b80bdcae5f45eaa4c129735b708f51a10e
refs/heads/master
<file_sep>addEventListener('fetch', event => { event.respondWith(handleRequest(event.request)) }) /** * Fetch and log a request * @param {Request} request */ async function handleRequest(request) { const { url } = request; const url_obj = new URL(url); const src = url_obj.searchParams.get('src') || 'https://raw.githubusercontent.com/ontouchstart/ontouchstart-rustwasm-markdown-parser/master/README.md'; const input = await (await fetch(src)).text() || `# Hello World`; const { parse } = wasm_bindgen; await wasm_bindgen(wasm); const output = ` <html> <meta charSet="utf-8"/> <a href="https://ontouchstart-rustwasm-markdown-parser.ontouchstart.workers.dev">🏡</a> <h1>HTML</h1> ${parse(input)} <h1>Markdown</h1> <pre><code>${input}</code></pre> </html> ` const headers = new Headers({ 'Content-Type': 'text/html', 'Access-Control-Allow-Origin': '*' }); const res = new Response(output, { status: 200, headers }); return res; } <file_sep>name = "ontouchstart-rustwasm-markdown-parser" type = "rust" account_id = "<KEY>" workers_dev = true route = "" zone_id = ""
caa94a1ad07195b154d7bc54b93e4e1eefc3f24f
[ "JavaScript", "TOML" ]
2
JavaScript
ontouchstart/ontouchstart-rustwasm-markdown-parser
5e7effb9f81da3c1ef7392574fa2aba729e2bf60
409c7463dbe77e4d0287a0454031ff515f0573b2
refs/heads/master
<repo_name>IQSS/dataverse-client-java<file_sep>/src/main/java/com/researchspace/dataverse/search/entities/SearchResults.java /* * */ package com.researchspace.dataverse.search.entities; import java.util.List; import java.util.stream.Collectors; import com.fasterxml.jackson.annotation.JsonProperty; import lombok.Data; /** * /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> * Encapsulates search results. * <p/> * Items' subclass can be identified from the 'type' value. * * @author rspace * */ @Data public class SearchResults <T extends Item> { String q; @JsonProperty(value = "total_count") private int totalCount; @JsonProperty(value = "count_in_response") private int countInResponse; int start; List<Object> spellingAlternatives; List<T> items; /** * Filters a list of SearchHits by their type. * @param type * @return */ public List<Item> filterByType (SearchType type) { return items.stream().filter((i)-> i.getType().equalsIgnoreCase(type.name())) .collect(Collectors.toList()); } } <file_sep>/src/main/java/com/researchspace/dataverse/entities/ObjectOrStringMessageDeserializer.java package com.researchspace.dataverse.entities; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonNode; import java.io.IOException; /** * 'message' property can be a string or an object with property 'message'; this handles both */ public class ObjectOrStringMessageDeserializer extends JsonDeserializer<String> { @Override public String deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException { JsonNode node = jp.getCodec().readTree((jp)); if (node.isTextual()){ return node.toString(); } else if (node.isObject()){ return node.get("message").toString(); } else{ throw new IllegalArgumentException("expect a string or an object with a string property 'message'"); } } } <file_sep>/CHANGELOG.md Significant changes since 0.1.0 1.1.0 In progress - feature: Support upload of files to a dataset using native API. #16 - feature: After creating a Dataset, the persistent ID is stored in the Identifier object. #22 1.0.0 2022-11-21 Increasing major version due to major updates to dependencies. However, there are no breaking API changes in this library. - dependencies: Major dependency updates to Spring 5.3, Lombok 18.24. - build: fix integration tests - build: enable integration test running through Github actions - build: test build and test on Java 8, 11, and 17. 0.2.0 2022-11-20 - build: update gradlew to use gradle 7.5 - build: Basic Github action to run tests on pull request - dependency: update Lombok from 1.16 to 1.18.4 to enable building on Java 11 - feature: #12 overloaded method for uploadFile <file_sep>/Contributing.md Requests for features, bug-fixes and any improvements are always welcome, as are constructive suggestions. Please raise an issue or submit a pull request.<file_sep>/src/main/java/com/researchspace/dataverse/spring/config/DataverseSpringConfig.java /* * */ package com.researchspace.dataverse.spring.config; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Scope; import com.researchspace.dataverse.api.v1.DataverseAPI; import com.researchspace.dataverse.http.DataverseAPIImpl; /** * /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> * Wires up classes and produces Beans for this component. * @author rspace * */ @Configuration public class DataverseSpringConfig { @Bean @Scope(value="prototype") DataverseAPI dataverseAPI(){ return new DataverseAPIImpl(); } } <file_sep>/src/test/java/com/researchspace/dataverse/http/DatasetFilePostMockServerTest.java /* * */ package com.researchspace.dataverse.http; import com.researchspace.dataverse.api.v1.DataverseConfig; import com.researchspace.dataverse.entities.DatasetFileList; import com.researchspace.dataverse.entities.Identifier; import com.researchspace.dataverse.search.entities.SearchConfig; import com.researchspace.dataverse.testutils.TestFileUtils; import org.junit.Test; import org.springframework.http.HttpMethod; import org.springframework.test.web.client.ExpectedCount; import org.springframework.test.web.client.MockRestServiceServer; import org.springframework.test.web.client.match.MockRestRequestMatchers; import org.springframework.test.web.client.response.MockRestResponseCreators; import org.springframework.web.client.RestTemplate; import java.net.MalformedURLException; import java.net.URL; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.springframework.http.MediaType.APPLICATION_JSON; import static org.springframework.test.web.client.match.MockRestRequestMatchers.method; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ public class DatasetFilePostMockServerTest { @Test public void testNativeFilePost() throws MalformedURLException { RestTemplate template = new RestTemplate(); DataverseOperationsImplV1 tss = setupDataverseOps(template); final String persistentid = "doi://dsfh.dsdsd.sds"; setUpServerResponse(template, "http://anyDataverse.com/api/v1/datasets/:persistentId/add?persistentId="+persistentid, getDataSetFileUploadResults() ); DataverseConfig cfg = new DataverseConfig(new URL("http://anyDataverse.com"), "any", "alias"); tss.configure(cfg); Identifier id = new Identifier(); id.setId(1234L); id.setPersistentId(persistentid); DatasetFileList resp = tss.uploadNativeFile(new byte []{}, FileUploadMetadata.builder().build(), id, "any"); assertNotNull(resp.getFiles()); assertEquals(1, resp.getFiles().size()); } private void setUpServerResponse(RestTemplate template, String url, String response) { MockRestServiceServer server = MockRestServiceServer.bindTo(template).build(); server.expect(ExpectedCount.once(), MockRestRequestMatchers.requestTo(url)) .andExpect(method(HttpMethod.POST)) .andRespond(MockRestResponseCreators.withSuccess(response, APPLICATION_JSON)); } DataverseOperationsImplV1 setUpDataset (SearchConfig srchCfg, String url, GetJson expectedJsonGetter) throws MalformedURLException { RestTemplate template = new RestTemplate(); DataverseOperationsImplV1 tss = setupDataverseOps(template); setUpServerResponse(template, url, expectedJsonGetter.getJson() ); DataverseConfig cfg = new DataverseConfig(new URL("http://anyDataverse.com"), "any", "alias"); tss.configure(cfg); return tss; } private DataverseOperationsImplV1 setupDataverseOps(RestTemplate template) { DataverseOperationsImplV1 tss = new DataverseOperationsImplV1(); tss.setTemplate(template); return tss; } @FunctionalInterface static interface GetJson { String getJson (); } private String getDataSetFileUploadResults() { return TestFileUtils.getJsonFromFile("nativeFileUploadResponse.json"); } } <file_sep>/src/main/java/com/researchspace/dataverse/entities/DataversePost.java package com.researchspace.dataverse.entities; import java.util.Date; import java.util.List; import com.fasterxml.jackson.annotation.JsonFormat; import lombok.Data; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ @Data @JsonFormat public class DataversePost { private String id; private String alias; private String name; private String affiliation; private String permissionRoot; private String description; private String ownerId; private Date creationDate; private List<DataverseContacts> dataverseContacts; } <file_sep>/build.gradle apply plugin: 'java' group = 'com.researchspace' sourceCompatibility = 1.8 version = '1.0.0' def springVersion='5.3.24' def jacksonVersion='2.11.1' def lombokVersion='1.18.24' repositories { mavenCentral() } configurations.all { resolutionStrategy { force 'xml-apis:xml-apis:1.0.b2' } } sourceSets { integrationTest { java { compileClasspath += main.output + test.output runtimeClasspath += main.output + test.output srcDir file('src/integration-test/java') } resources.srcDir file('src/integration-test/resources') } } // these are unused SWORD transitive dependencies which are either obsolete or now included in the JDK configurations { all*.exclude group: 'javax.jms' all*.exclude group: 'com.sun.jdmk' all*.exclude group: 'com.sun.jmx' integrationTestImplementation.extendsFrom testImplementation integrationTestRuntime.extendsFrom testRuntime } dependencies { implementation 'org.springframework:spring-web:'+springVersion implementation 'org.springframework:spring-context:'+springVersion implementation 'org.springframework:spring-core:'+springVersion implementation 'com.fasterxml.jackson.core:jackson-annotations:'+jacksonVersion implementation 'com.fasterxml.jackson.core:jackson-core:'+jacksonVersion implementation 'com.fasterxml.jackson.core:jackson-databind:'+jacksonVersion implementation 'org.projectlombok:lombok:' + lombokVersion annotationProcessor 'org.projectlombok:lombok:' + lombokVersion implementation 'org.apache.commons:commons-lang3:3.12.0' implementation 'org.slf4j:slf4j-api:1.7.21' //for SWORD-API. implementation 'org.swordapp:sword2-client:0.9.3' implementation 'org.apache.abdera:abdera-client:1.1.1' testImplementation 'junit:junit:4.13.2' testImplementation 'org.springframework:spring-test:'+springVersion testImplementation 'org.slf4j:slf4j-simple:1.7.36' } test{ testLogging.showStandardStreams = true //delegate command line properties through to Test JVM systemProperties = System.getProperties() } task integrationTest(type: Test) { testClassesDirs = sourceSets.integrationTest.output classpath = sourceSets.integrationTest.runtimeClasspath outputs.upToDateWhen { false } systemProperties = System.getProperties() } check.dependsOn integrationTest integrationTest.mustRunAfter test tasks.withType(Test) { reports.html.destination = file("${reporting.baseDir}/${name}") } //Lists dependencies task listJars { doLast { configurations.compile.each { File file -> println file.name } } } <file_sep>/Readme.md # Dataverse API bindings project This project is a Java wrapper around the [Dataverse API](http://guides.dataverse.org/en/4.2/api/). It was initially contributed by [ResearchSpace](www.researchspace.com) in October 2016. ## Building ### Dependencies This project requires Java 8 minimum to compile and run. It is built and tested on Java 11 and Java 17. It also uses Spring-web (to provide low-level HTTP request/response parsing.) The Sword client library is included in this project as a jar file, as it is not available in a public maven repository. ### Gradle This project is built using Gradle. You can build straight away without needing to install anything: ./gradlew clean build -x integrationTest which will compile, run unit tests (but not integration tests) and build a jar file. ### Running integration tests Integration tests require a connection to a Dataverse instance. In order to connect to a Dataverse for running tests, the following configuration is set up in `test.properties`. dataverseServerURL=https://demo.dataverse.org dataverseAlias=otter606 As a minimum, you'll need to specify an API key on the command line to run the tests: ./gradlew clean integrationTest -DdataverseApiKey=xxx-<KEY> You can also override the Dataverse server URL and Id with your own settings by setting them on the command line: ./gradlew clean integrationTest -DdataverseServerURL=https://my.dataverse.org -DdataverseApiKey=xxx-xxx-xxx -DdataverseAlias=MY-DEMO-DATAVERSE ### Installing into a Maven repository This project can be added as a Gradle or Maven dependency in your project using [JitPack](https://jitpack.io). If using Maven, add this to your pom.xml file (thanks AleixMT). ``` <repositories> <repository> <id>jitpack.io</id> <url>https://jitpack.io</url> </repository> </repositories> <dependencies> <dependency> <groupId>com.github.IQSS</groupId> <artifactId>dataverse-client-java</artifactId> <version>master-SNAPSHOT</version> </dependency> </dependencies>` ``` Or, you can run: ./gradlew clean install to install into a local repository and generate a pom.xml file for calculating dependencies. ## Usage The best way to explore the bindings currently is by examining integration tests, especially those extending from `AbstractIntegrationTest`. Very briefly.... ```java DataverseAPI api = new DataverseAPIImpl(); //must set in serverURL and apiKey first. DataverseConfig config = new DataverseConfig(serverURL, apiKey, dataverseAlias); api.configure(config); // now you can call api.getDataverseOperations().getDataverseById(dataverseAlias); ``` Searching uses a builder pattern to build a search query: ```java SearchOperations searchOps = dataverseAPI.getSearchOperations(); SearchConfig cfg = searchOps.builder().q(FILE_SEARCH_TERM) .sortBy(SortBy.date) .sortOrder(SortOrder.asc) .showFacets(true) .showRelevance(true) .start(1) .perPage(3) .build(); DataverseResponse<SearchResults<Item>> results = searchOps.search(cfg); ``` ### Synchronisation and thread-safety There is no explicit synchronisation performed in this library. The Dataverse configuration is stored in the internal state of implementation classes, so new instances of `DataverseAPIImpl` should be used for each request if running in a multi-threaded environment connecting to different Dataverses. ## Github actions Tests and integration tests run on: - pull request - merge to master, - once a month, to detect regressions in API calls to https://demo.dataverse.org. ## Developing This project makes use of [Project Lombok](https://projectlombok.org) which greatly speeds up the development of POJO classes to wrap JSON data structures. There are [instructions](https://projectlombok.org/features/index.html) on how to add it to your IDE. ### Coding standards Please make sure tests pass before committing, and to add new tests for new additions. ## Progress API | Endpoint | URL | Implemented ?| Notes ------|----------|-----|--------------|------- Native|Dataverses | POST `api/dataverses/$id` | Y| - | - | - | GET `api/dataverses/$id` | Y | - | - | - | GET `api/dataverses/$id/contents` | Y | - | - | - | DELETE `api/dataverses/$id` | Y | - | - | - | POST `api/dataverses/$id/datasets` | Y | - | - | - | POST `api/dataverses/$identifier/actions/:publish` | Y | - Native|Datasets | POST `api/dataverses/$id` | Y| - | - | - | GET `api/datasets/$id` | Y | - | - | - | DELETE `api/datasets/$id` | Y | - | - | - | GET `api/datasets/$id/versions` | Y | - | - | - | GET `PUT api/datasets/$id/versions/:draft?` | Y | - | - | - | POST `PUT api/datasets/$id/actions/:publish?type=$type` | Y | - Native|MetadataBlocks | GET ` api/metadatablocks` | Y| - | - | - | GET ` api/metadatablocks/$identifier` | Y| - Search | - | GET `api/search` | In progress | All query params supported, optional data not returned yet. Sword | Upload file | 'Add files to a dataset with a zip file' | Y | - <file_sep>/src/main/java/com/researchspace/dataverse/entities/facade/ContributorType.java /* * */ package com.researchspace.dataverse.entities.facade; /** * /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> * Listing of Contributor Types for Dataset 'Contributor' Citation field. * @author rspace * */ public enum ContributorType { DataCollector("Data Collector"), DataCurator("Data Curator"), DataManager("Data Manager"), Editor("Editor"), Funder("Funder"), HostingInstitution("Hosting Institution"), ProjectLeader("Project Leader"), ProjectManager("Project Manager"), ProjectMember("Project Member"), RelatedPerson("Related Person"), Researcher("Researcher"), ResearchGroup("Research Group"), RightsHolder("Rights Holder"), Sponsor("Sponsor"), Supervisor("Supervisor"), WorkPackageLeader("Work Package Leader"), Other("Other"); private String displayName; private ContributorType (String displayName) { this.displayName = displayName; } public String getDisplayName() { return displayName; } } <file_sep>/src/main/java/com/researchspace/dataverse/api/v1/SearchOperations.java /* * */ package com.researchspace.dataverse.api.v1; import com.researchspace.dataverse.entities.DataverseResponse; import com.researchspace.dataverse.search.entities.DatasetItem; import com.researchspace.dataverse.search.entities.DataverseItem; import com.researchspace.dataverse.search.entities.FileSearchHit; import com.researchspace.dataverse.search.entities.Item; import com.researchspace.dataverse.search.entities.SearchConfig; import com.researchspace.dataverse.search.entities.SearchConfig.SearchConfigBuilder; import com.researchspace.dataverse.search.entities.SearchResults; /** * <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> Top-level entry point into the Dataverse Level1 Search API * @author rspace * */ public interface SearchOperations { /** * Gets a new instance of a SearchConfigBuilder to configure a search query * @return */ SearchConfigBuilder builder(); /** * Perform a search * @param cfg A {@link SearchConfig} object generated from a {@link SearchConfigBuilder} * @return */ DataverseResponse<SearchResults<Item>> search(SearchConfig cfg); /** * A search restricted to Dataverse files that returns a typed list of {@link FileSearchHit}. * @param A {@link SearchConfig} configured to search by SearchType.file only * @throws IllegalArgumentException if search config is not set to return files only. */ DataverseResponse<SearchResults<FileSearchHit>> searchFiles(SearchConfig cfg); /** * A search restricted to Dataverses that returns a typed list of {@link DataverseItem}. * @param A {@link SearchConfig} configured to search by SearchType.dataverse only * @throws IllegalArgumentException if search config is not set to return dataverses only. */ DataverseResponse<SearchResults<DataverseItem>> searchDataverses(SearchConfig cfg); /** * A search restricted to Dataverses that returns a typed list of {@link DatasetItem}. * @param A {@link SearchConfig} configured to search by SearchType.dataset only * @throws IllegalArgumentException if search config is not set to return datasets only. */ DataverseResponse<SearchResults<DatasetItem>> searchDatasets(SearchConfig cfg); } <file_sep>/src/test/java/com/researchspace/dataverse/http/SearchOpsMockServerTest.java /* * */ package com.researchspace.dataverse.http; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.springframework.http.MediaType.APPLICATION_JSON; import static org.springframework.test.web.client.match.MockRestRequestMatchers.method; import java.net.MalformedURLException; import java.net.URL; import java.util.EnumSet; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.springframework.http.HttpMethod; import org.springframework.test.web.client.ExpectedCount; import org.springframework.test.web.client.MockRestServiceServer; import org.springframework.test.web.client.match.MockRestRequestMatchers; import org.springframework.test.web.client.response.MockRestResponseCreators; import org.springframework.web.client.RestTemplate; import com.researchspace.dataverse.api.v1.DataverseConfig; import com.researchspace.dataverse.entities.DataverseResponse; import com.researchspace.dataverse.search.entities.DatasetItem; import com.researchspace.dataverse.search.entities.DataverseItem; import com.researchspace.dataverse.search.entities.FileSearchHit; import com.researchspace.dataverse.search.entities.Item; import com.researchspace.dataverse.search.entities.SearchConfig; import com.researchspace.dataverse.search.entities.SearchResults; import com.researchspace.dataverse.search.entities.SearchType; import com.researchspace.dataverse.testutils.TestFileUtils; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ public class SearchOpsMockServerTest { @Before public void setUp() throws Exception { } @After public void tearDown() throws Exception { } @Test public void testSimpleSearch() throws MalformedURLException { SearchConfig scfg = SearchConfig.builder().q("trees").build(); RestTemplate template = new RestTemplate(); SearchOperationsImplV1 tss = setupSearchOps(template); setUpServerResponse(template, "http://anyDataverse.com/api/v1/search?q=trees", getSimpleQueryResult() ); DataverseConfig cfg = new DataverseConfig(new URL("http://anyDataverse.com"), "any", "alias"); tss.configure(cfg); DataverseResponse<SearchResults<Item>> resp = tss.search(scfg); assertNotNull(resp.getData()); assertEquals("trees", resp.getData().getQ()); } private void setUpServerResponse(RestTemplate template, String url, String response) { MockRestServiceServer server = MockRestServiceServer.bindTo(template).build(); server.expect(ExpectedCount.once(), MockRestRequestMatchers.requestTo(url)) .andExpect(method(HttpMethod.GET)) .andRespond(MockRestResponseCreators.withSuccess(response, APPLICATION_JSON)); } @Test public void testComplexSearch() throws MalformedURLException { SearchConfig scfg = SearchConfig.builder().q("trees").build(); RestTemplate template = new RestTemplate(); SearchOperationsImplV1 tss = setupSearchOps(template); setUpServerResponse(template, "http://anyDataverse.com/api/v1/search?q=trees", getComplexQueryResult() ); DataverseConfig cfg = new DataverseConfig(new URL("http://anyDataverse.com"), "any", "alias"); tss.configure(cfg); DataverseResponse<SearchResults<Item>> resp = tss.search(scfg); assertNotNull(resp.getData()); assertEquals("trees", resp.getData().getQ()); SearchResults<Item> searches = resp.getData(); assertEquals(3, searches.getTotalCount()); assertEquals(3, searches.getCountInResponse()); assertEquals(3, searches.getItems().size()); assertEquals(1, searches.filterByType(SearchType.dataset).size()); assertEquals(1, searches.filterByType(SearchType.dataverse).size()); assertEquals(1, searches.filterByType(SearchType.file).size()); } @Test public void testFileSearch() throws MalformedURLException { SearchConfig scfg = SearchConfig.builder().q("trees").type(EnumSet.of(SearchType.file)).build(); RestTemplate template = new RestTemplate(); SearchOperationsImplV1 tss = setupSearchOps(template); setUpServerResponse(template, "http://anyDataverse.com/api/v1/search?q=trees&type=file", getFileQueryResult() ); DataverseConfig cfg = new DataverseConfig(new URL("http://anyDataverse.com"), "any", "alias"); tss.configure(cfg); DataverseResponse<SearchResults<FileSearchHit>> resp = tss.searchFiles(scfg); SearchResults<FileSearchHit> searches = resp.getData(); assertBasicSearchResultParsing(searches); assertEquals("text/plain", searches.getItems().get(0).getFileContentType()); } @Test public void testDatasetSearch() throws MalformedURLException { SearchConfig scfg = SearchConfig.builder().q("trees").type(EnumSet.of(SearchType.dataset)).build(); SearchOperationsImplV1 tss = setUpSearch(scfg, "http://anyDataverse.com/api/v1/search?q=trees&type=dataset", ()->getDatasetQueryResult() ); DataverseResponse<SearchResults<DatasetItem>> resp = tss.searchDatasets(scfg); SearchResults<DatasetItem> searches = resp.getData(); assertBasicSearchResultParsing(searches); assertEquals("doi:10.5072/FK2/1FUEXN", searches.getItems().get(0).getGlobalId()); } @Test public void testDataverseSearch() throws MalformedURLException { SearchConfig scfg = SearchConfig.builder().q("trees").type(EnumSet.of(SearchType.dataverse)).build(); SearchOperationsImplV1 tss = setUpSearch(scfg, "http://anyDataverse.com/api/v1/search?q=trees&type=dataverse", ()->getDataverseQueryResult() ); DataverseResponse<SearchResults<DataverseItem>> resp = tss.searchDataverses(scfg); SearchResults<DataverseItem> searches = resp.getData(); assertBasicSearchResultParsing(searches); assertEquals("https://demo.dataverse.org/dataverse/trunctest", searches.getItems().get(0).getUrl()); } SearchOperationsImplV1 setUpSearch (SearchConfig srchCfg, String url, GetJson expectedJsonGetter) throws MalformedURLException { RestTemplate template = new RestTemplate(); SearchOperationsImplV1 tss = setupSearchOps(template); setUpServerResponse(template, url, expectedJsonGetter.getJson() ); DataverseConfig cfg = new DataverseConfig(new URL("http://anyDataverse.com"), "any", "alias"); tss.configure(cfg); return tss; } void assertBasicSearchResultParsing (SearchResults<? extends Item> searches) { assertNotNull(searches); assertEquals("trees", searches.getQ()); assertEquals(1, searches.getTotalCount()); assertEquals(1, searches.getCountInResponse()); assertEquals(1, searches.getItems().size()); } @Test(expected=IllegalArgumentException.class) public void testFileSearchThrowsIAEIfTypeNotFile() { SearchConfig scfg = SearchConfig.builder().q("trees").type(EnumSet.of(SearchType.dataset)).build(); RestTemplate template = new RestTemplate(); SearchOperationsImplV1 tss = setupSearchOps(template); tss.searchFiles(scfg); } private SearchOperationsImplV1 setupSearchOps(RestTemplate template) { SearchOperationsImplV1 tss = new SearchOperationsImplV1(); tss.setTemplate(template); return tss; } @FunctionalInterface static interface GetJson { String getJson (); } private String getSimpleQueryResult() { return TestFileUtils.getJsonFromFile("simpleQuery.json"); } //gets 3 results: file, dataset, dataverse private String getComplexQueryResult() { return TestFileUtils.getJsonFromFile("multiTypeSearch.json"); } private String getFileQueryResult() { return TestFileUtils.getJsonFromFile("fileSearch.json"); } private String getDatasetQueryResult() { return TestFileUtils.getJsonFromFile("datasetSearch.json"); } private String getDataverseQueryResult() { return TestFileUtils.getJsonFromFile("dataverseSearch.json"); } } <file_sep>/src/main/java/com/researchspace/dataverse/search/entities/SearchConfig.java /* * */ package com.researchspace.dataverse.search.entities; import java.util.EnumSet; import org.apache.commons.lang3.Validate; import lombok.Builder; import lombok.NonNull; import lombok.Value; /** * /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> * Read-only search configuration object. <br/> * Use the <code>builder()</code> method to return a new SearchConfigBuilder to * build a search configuration in a Fluent style. * * @author rspace * */ @Builder(toBuilder = true) @Value public class SearchConfig { /** * Builder for configuring search via fluent API * * @author rspace * */ public static class SearchConfigBuilder { /** * Sets results per page. Maximum is 1000 * * @param perPage * if &gt 1000, will set to 1000 * @return * @throws IllegalArgumentException * if <code>perPage</code> &lt= 0 */ public SearchConfigBuilder perPage(int perPage) { Validate.isTrue(perPage > 0, "Cannot have negative results per page"); if (perPage > MAX_RESULTS_PER_PAGE) { perPage = MAX_RESULTS_PER_PAGE; } this.perPage = perPage; return this; } /** * Sets results per page. Maximum is 1000 * * @param perPage * if &gt 1000, will set to 1000 * @return * @throws IllegalArgumentException * if <code>perPage</code> &lt= 0 */ public SearchConfigBuilder start(int start) { Validate.isTrue(start > 0, "Cannot have negative starting point"); this.start = start; return this; } } public static final int MAX_RESULTS_PER_PAGE = 1000; private EnumSet<SearchType> type; private @NonNull String q; private String subtree, filterQuery; private SortBy sortBy; private SortOrder sortOrder; private int perPage, start; private boolean showRelevance, showFacets; } <file_sep>/src/main/java/com/researchspace/dataverse/http/SearchOperationsImplV1.java /* * */ package com.researchspace.dataverse.http; import org.springframework.core.ParameterizedTypeReference; import org.springframework.http.HttpEntity; import org.springframework.http.HttpHeaders; import org.springframework.http.HttpMethod; import org.springframework.http.ResponseEntity; import com.researchspace.dataverse.api.v1.SearchOperations; import com.researchspace.dataverse.entities.DataverseResponse; import com.researchspace.dataverse.search.entities.DatasetItem; import com.researchspace.dataverse.search.entities.DataverseItem; import com.researchspace.dataverse.search.entities.FileSearchHit; import com.researchspace.dataverse.search.entities.Item; import com.researchspace.dataverse.search.entities.SearchConfig; import com.researchspace.dataverse.search.entities.SearchConfig.SearchConfigBuilder; import com.researchspace.dataverse.search.entities.SearchResults; import com.researchspace.dataverse.search.entities.SearchType; import lombok.extern.slf4j.Slf4j; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ @Slf4j public class SearchOperationsImplV1 extends AbstractOpsImplV1 implements SearchOperations { private SearchURLBuilder urlBuilder = new SearchURLBuilder(); @Override public SearchConfigBuilder builder() { return SearchConfig.builder(); } @Override public DataverseResponse<SearchResults<Item>> search(SearchConfig cfg) { String url = createV1Url("search"); url = urlBuilder.buildSearchUrl(url, cfg); HttpHeaders headers = addAPIKeyToHeader(); ParameterizedTypeReference<DataverseResponse<SearchResults<Item>>> type = new ParameterizedTypeReference<DataverseResponse<SearchResults<Item>>>() { }; ResponseEntity<DataverseResponse<SearchResults<Item>>> resp = template.exchange(url, HttpMethod.GET, createHttpEntity(headers), type); log.debug(resp.getBody().getData().toString()); return resp.getBody(); } private HttpEntity<String> createHttpEntity(HttpHeaders headers) { HttpEntity<String> entity = new HttpEntity<String>("", headers); return entity; } @Override public DataverseResponse<SearchResults<FileSearchHit>> searchFiles(SearchConfig cfg) { validateSrchConfig(cfg, SearchType.file); String url = createV1Url("search"); url = urlBuilder.buildSearchUrl(url, cfg); HttpHeaders headers = addAPIKeyToHeader(); ParameterizedTypeReference<DataverseResponse<SearchResults<FileSearchHit>>> type = new ParameterizedTypeReference<DataverseResponse<SearchResults<FileSearchHit>>>() { }; ResponseEntity<DataverseResponse<SearchResults<FileSearchHit>>> resp = template.exchange(url, HttpMethod.GET, createHttpEntity(headers), type); log.debug(resp.getBody().getData().toString()); return resp.getBody(); } @Override public DataverseResponse<SearchResults<DatasetItem>> searchDatasets(SearchConfig cfg) { validateSrchConfig(cfg, SearchType.dataset); String url = createV1Url("search"); url = urlBuilder.buildSearchUrl(url, cfg); HttpHeaders headers = addAPIKeyToHeader(); ParameterizedTypeReference<DataverseResponse<SearchResults<DatasetItem>>> type = new ParameterizedTypeReference<DataverseResponse<SearchResults<DatasetItem>>>() { }; ResponseEntity<DataverseResponse<SearchResults<DatasetItem>>> resp = template.exchange(url, HttpMethod.GET, createHttpEntity(headers), type); log.debug(resp.getBody().getData().toString()); return resp.getBody(); } @Override public DataverseResponse<SearchResults<DataverseItem>> searchDataverses(SearchConfig cfg) { validateSrchConfig(cfg, SearchType.dataverse); String url = createV1Url("search"); url = urlBuilder.buildSearchUrl(url, cfg); HttpHeaders headers = addAPIKeyToHeader(); ParameterizedTypeReference<DataverseResponse<SearchResults<DataverseItem>>> type = new ParameterizedTypeReference<DataverseResponse<SearchResults<DataverseItem>>>() { }; ResponseEntity<DataverseResponse<SearchResults<DataverseItem>>> resp = template.exchange(url, HttpMethod.GET, createHttpEntity(headers), type); log.debug(resp.getBody().getData().toString()); return resp.getBody(); } private void validateSrchConfig(SearchConfig cfg, SearchType expected) { if(cfg.getType().size() != 1 || !cfg.getType().contains(expected)) { throw new IllegalArgumentException(String.format("Search must be configured to search only %ss", expected.name())); } } } <file_sep>/src/integration-test/java/com/researchspace/dataverse/http/DataverseOperationsTest.java /* * */ package com.researchspace.dataverse.http; import com.researchspace.dataverse.entities.*; import org.apache.commons.lang.RandomStringUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import java.util.Arrays; import static org.junit.Assert.*; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ public class DataverseOperationsTest extends AbstractIntegrationTest { @Before public void setup() throws Exception { super.setUp(); } @After public void tearDown() throws Exception { } @Test public void createPublishAndDeleteNewDataverse(){ String dvName = RandomStringUtils.randomAlphabetic(10); DataversePost dv = createADataverse(dvName); DataverseResponse<DataversePost> success = dataverseOps.createNewDataverse(dataverseAlias, dv); assertNotNull(success.getData()); assertNotNull(success.getData().getId()); dataverseOps.publishDataverse(dvName); DataverseResponse<DvMessage> deleted = dataverseOps.deleteDataverse(dvName); assertTrue(deleted.getStatus().equals("OK")); assertNotNull(deleted.getData()); } static DataversePost createADataverse(String dvName) { DataversePost dv = new DataversePost(); dv.setAlias(dvName); dv.setName("Test Instance " + dvName); dv.setDataverseContacts(Arrays.asList(new DataverseContacts("<EMAIL>"))); return dv; } @Test public void deleteUnknownDataverseHandled () { DataverseResponse<DvMessage> deleted = dataverseOps.deleteDataverse("ra"); assertTrue(deleted.getStatus().equals(ERROR_MSG)); assertNull(deleted.getData()); } @Test(expected=IllegalArgumentException.class) public void createDataverseValidation () { String dvName = RandomStringUtils.randomAlphabetic(10); DataversePost dv = createADataverse(dvName); dv.setAlias(""); dataverseOps.createNewDataverse("rspace", dv); } @Test public void createDataverseValidationContactRequired () { String dvName = RandomStringUtils.randomAlphabetic(10); DataversePost dv = createADataverse(dvName); dv.setDataverseContacts(null); Assert.assertThrows(NullPointerException.class, ()->dataverseOps.createNewDataverse("rspace", dv)); } @Test public void testGetDataverseById() { DataverseGet dv = dataverseOps.getDataverseById(dataverseAlias); assertNotNull(dv.getId()); assertTrue(dv.getContactEmails().size() > 0); } } <file_sep>/src/test/java/com/researchspace/dataverse/entities/facade/DatasetBuilderTest.java /* * */ package com.researchspace.dataverse.entities.facade; import static com.researchspace.dataverse.entities.facade.DatasetTestFactory.*; import java.net.MalformedURLException; import java.net.URISyntaxException; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import com.researchspace.dataverse.entities.Dataset; import com.researchspace.dataverse.entities.facade.DatasetBuilder; import com.researchspace.dataverse.entities.facade.DatasetFacade; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ public class DatasetBuilderTest { DatasetBuilder builder; @Before public void setUp() throws Exception { builder = new DatasetBuilder(); } @After public void tearDown() throws Exception { } @Test public void test() throws JsonProcessingException, MalformedURLException, URISyntaxException { DatasetFacade facade = createFacade(); ObjectWriter mapper = new ObjectMapper().writerWithDefaultPrettyPrinter(); Dataset dversion = builder.build(facade); String json = mapper.writeValueAsString(dversion); System.out.println(json); } } <file_sep>/src/main/java/com/researchspace/dataverse/api/v1/InfoOperations.java /* * */ package com.researchspace.dataverse.api.v1; import com.researchspace.dataverse.entities.DataverseResponse; import com.researchspace.dataverse.entities.DvMessage; /** * <pre> * Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> <p/> * *Wrapper for InfoOperations: *<pre> *GET http://$SERVER/api/info/settings/:DatasetPublishPopupCustomText *and *url -X PUT -d "publish" https://demo.dataverse.org/api/admin/settings/:DatasetPublishPopupCustomText *</pre> *and * */ public interface InfoOperations { DvMessage getDatasetPublishPopupCustomText () ; /** * Deprecated, does not work for client calls from non-Localhost URLs from Dataverse 4.8 onwards */ DataverseResponse<Object> setDatasetPublishPopupCustomText (String text) ; } <file_sep>/src/main/java/com/researchspace/dataverse/http/FileUploadMetadata.java package com.researchspace.dataverse.http; import lombok.Builder; import lombok.Data; import java.util.List; /** * Request object for metadata included with native file upload to an existing dataset */ @Data @Builder public class FileUploadMetadata { private String description; private String directoryLabel; private List<String> categories; private boolean restrict; private boolean tabIngest; } <file_sep>/src/main/java/com/researchspace/dataverse/http/AbstractOpsImplV1.java /* * */ package com.researchspace.dataverse.http; import com.researchspace.dataverse.api.v1.DataverseConfig; import com.researchspace.dataverse.entities.DataverseResponse; import com.researchspace.springrest.ext.LoggingResponseErrorHandler; import com.researchspace.springrest.ext.RestUtil; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang.StringUtils; import org.springframework.http.HttpHeaders; import org.springframework.http.MediaType; import org.springframework.http.ResponseEntity; import org.springframework.web.client.RestClientException; import org.springframework.web.client.RestTemplate; import java.util.Arrays; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ @Slf4j public abstract class AbstractOpsImplV1 { String apiKey = ""; String serverURL = ""; String serverAPIURL = serverURL +"/api"; String serverAPIv1URL = serverAPIURL +"/v1"; protected RestTemplate template; public AbstractOpsImplV1() { super(); this.template = createTemplate(); } protected void setTemplate(RestTemplate template) { this.template = template; } public final static String apiHeader = "X-Dataverse-key"; public void setApiKey(String apiKey) { this.apiKey = apiKey; } public void setServerURL(String serverURL) { this.serverURL = serverURL; this.serverAPIURL = serverURL + "/api"; this.serverAPIv1URL = this.serverAPIURL +"/v1"; } public void configure(DataverseConfig config) { setApiKey(config.getApiKey()); setServerURL(config.getServerURL().toString()); } <T> void handleError(ResponseEntity<DataverseResponse<T>> resp) { log.debug("{}", resp.getBody()); if (RestUtil.isError(resp.getStatusCode())) { String msg = String.format("Error code returned %d with message [%s]", resp.getStatusCodeValue(), resp.getBody().getMessage()); log.error(msg); throw new RestClientException(msg); } } RestTemplate createTemplate() { RestTemplate template = new RestTemplate(); template.setErrorHandler(new LoggingResponseErrorHandler()); return template; } String createV1Url(String ... pathComponents) { String url = serverAPIv1URL + "/" + StringUtils.join(pathComponents, "/") ; log.info("URL is {}", url); return url; } String createAdminUrl(String ... pathComponents) { String url = serverAPIURL + "/" + StringUtils.join(pathComponents, "/") ; log.info("URL is {}", url); return url; } HttpHeaders addAPIKeyToHeader() { HttpHeaders headers = new HttpHeaders(); headers.setContentType(MediaType.APPLICATION_JSON); headers.setAccept(Arrays.asList(MediaType.APPLICATION_JSON)); addApiKey(headers); return headers; } void addApiKey(HttpHeaders headers) { headers.add(apiHeader, apiKey); } } <file_sep>/src/integration-test/resources/test.properties dataverseServerURL=https://demo.dataverse.org dataverseAlias=otter606<file_sep>/Contributors.md ## Contributors Initial code contribution: <NAME> (otter606). PRs and contributions: <NAME> (AleixMT) <file_sep>/src/test/java/com/researchspace/dataverse/http/SearchURLBuilderTest.java /* * */ package com.researchspace.dataverse.http; import static org.junit.Assert.assertTrue; import java.util.EnumSet; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.researchspace.dataverse.search.entities.SearchConfig; import com.researchspace.dataverse.search.entities.SearchType; import com.researchspace.dataverse.search.entities.SortBy; import com.researchspace.dataverse.search.entities.SortOrder; /** <pre> Copyright 2016 ResearchSpace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> */ public class SearchURLBuilderTest { SearchURLBuilder builder; @Before public void setUp() throws Exception { builder = new SearchURLBuilder(); } @After public void tearDown() throws Exception { } @Test public void testBuildSearchUrlTypes() { SearchConfig cfg = SearchConfig.builder() .q("any") .type(EnumSet.of(SearchType.dataset, SearchType.file)) .build(); String url = builder.buildSearchUrl("/any", cfg); assertTrue(url, url.contains("type=dataset&type=file")); } @Test public void testBuildSearchUrlSort() { SearchConfig cfg = SearchConfig.builder() .q("any") .sortBy(SortBy.name) .build(); String url = builder.buildSearchUrl("/any", cfg); assertTrue(url, url.contains("sort=name")); } @Test public void testBuildSearchUrlOrder() { SearchConfig cfg = SearchConfig.builder() .q("any") .sortOrder(SortOrder.asc) .build(); String url = builder.buildSearchUrl("/any", cfg); assertTrue(url, url.contains("order=asc")); } @Test public void testBuildSearchUrlStart() { SearchConfig cfg = SearchConfig.builder() .q("any") .start(10) .build(); String url = builder.buildSearchUrl("/any", cfg); assertTrue(url, url.contains("start=10")); } @Test public void testBuildSearchUrlPerPage() { SearchConfig cfg = SearchConfig.builder() .q("any") .perPage(5) .build(); String url = builder.buildSearchUrl("/any", cfg); assertTrue(url, url.contains("per_page=5")); } }
e28cf849d9cace94186e8fc4cf7b1f1e38a90880
[ "Markdown", "Java", "INI", "Gradle" ]
22
Java
IQSS/dataverse-client-java
22da346d8263c5feb768b0bc7571de9b72e4dbca
3ce34db5c71d2f45d547b9203b86dc1b7b13a3fa
refs/heads/master
<file_sep>// // BaseResponse.swift // NotificiationList // // Created by <NAME> on 4.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES class BaseResponse : Codable { let value : [Base] let message : String let code : Int let error : Bool } <file_sep>// // RoundedImageView.swift // NotificiationList // // Created by <NAME> on 1.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import UIKit class RoundedImageView: UIImageView { override func awakeFromNib() { super.awakeFromNib() self.clipsToBounds = true self.layer.cornerRadius = self.layer.bounds.height / 2 } } <file_sep>// // EventMedia.swift // NotificiationList // // Created by <NAME> on 5.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES class EventMedia: Codable { let url : String } <file_sep>// // NotificationResponse.swift // NotificiationList // // Created by <NAME> on 1.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES struct NotificationResponse: Decodable { let message: String let code: Int let error: Bool let notifications: [Notification] enum CodingKeys: String, CodingKey { case message case code case error case value case notifications = "notification" } init(from decoder: Decoder) throws { let container = try decoder.container(keyedBy: CodingKeys.self) message = try container.decode(String.self, forKey: .message) code = try container.decode(Int.self, forKey: .code) error = try container.decode(Bool.self, forKey: .error) let value = try container.nestedContainer(keyedBy: CodingKeys.self, forKey: .value) notifications = try value.decode([Notification].self, forKey: .notifications) } } <file_sep>// // File.swift // NotificiationList // // Created by <NAME> on 1.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation <file_sep># YOUR_SOCIAL_MEDIA_WITH_YOUR_API #Screenshot ![2](https://user-images.githubusercontent.com/24847947/54753213-8c074900-4bf1-11e9-9090-18ed22e79c38.png) ![3](https://user-images.githubusercontent.com/24847947/54753214-8c9fdf80-4bf1-11e9-919f-0f8323bc0c5f.png) ![1](https://user-images.githubusercontent.com/24847947/54753215-8c9fdf80-4bf1-11e9-8fc5-2f00ed7f799d.png) <file_sep>// // Notificiation.swift // NotificiationList // // Created by <NAME> on 2.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES struct Notification : Decodable { let user : Users let message : String let transactionTime : String let serverTime : String let isSeen : Bool } <file_sep>// // MainPageTableViewCell.swift // NotificiationList // // Created by <NAME> on 4.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import UIKit import Kingfisher class MainPageTableViewCell: UITableViewCell { @IBOutlet weak var userImageView: RoundedImageView! @IBOutlet weak var activityImageView: UIImageView! @IBOutlet weak var userNameLabel: UILabel! @IBOutlet weak var dateLabel: UILabel! @IBOutlet weak var eventLabel: UILabel! var mainResponse : Base? { didSet{ guard let mainResponse = mainResponse else { return } self.activityImageView.kf.setImage(with: URL(string: mainResponse.imageUrl)!) self.userNameLabel.text = mainResponse.userName self.userImageView.kf.setImage(with: URL(string: mainResponse.userAvatarUrl)!) } } } <file_sep>// // EventDetailViewController.swift // NotificiationList // // Created by <NAME> on 5.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import UIKit import Kingfisher class EventDetailViewController: UIViewController { @IBOutlet weak var eventImageView: UIImageView! @IBOutlet weak var eventTitle: UILabel! @IBOutlet weak var eventDescriptionField: UITextView! private var eventResponse : EventResponse?{ didSet{ eventTitle.text = eventResponse?.value.title eventDescriptionField.text = eventResponse?.value.eventDescription eventImageView.kf.setImage(with: URL(string: (eventResponse?.value.media[0].url)!)) } } override func viewDidLoad() { super.viewDidLoad() ApiManager.shared.callEventDetail(class: EventResponse.self, success: { (responseObject) in if !responseObject.error{ print(responseObject.message) self.eventResponse = responseObject return } }) { (error) in print(error.localizedDescription) } } } <file_sep>// // ViewController.swift // NotificiationList // // Created by <NAME> on 1.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import UIKit class ViewController: UIViewController { @IBOutlet weak var tableView: UITableView! private var notifications: [Notification]? { didSet { self.tableView.reloadData() } } override func viewDidLoad() { super.viewDidLoad() getAllNotification() tableView.delegate = self tableView.dataSource = self self.tableView.register(UINib(nibName: "NotificationTableViewCell", bundle: nil), forCellReuseIdentifier: "notificationCell") } private func getAllNotification(){ ApiManager.shared.call(class: NotificationResponse.self, success: { (responseObject) in if !responseObject.error { print(responseObject.message) self.notifications = responseObject.notifications return } }) { (error) in print(error.localizedDescription) } } } extension ViewController: UITableViewDelegate, UITableViewDataSource { func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return self.notifications?.count ?? 0 } func tableView(_ tableView: UITableView, willDisplay cell: UITableViewCell, forRowAt indexPath: IndexPath) { guard let cell = cell as? NotificationTableViewCell, let notification = self.notifications?[indexPath.row] else { return } cell.notification = notification } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { guard let cell = tableView.dequeueReusableCell(withIdentifier: "notificationCell", for: indexPath) as? NotificationTableViewCell else { return UITableViewCell() } return cell } } <file_sep># Uncomment the next line to define a global platform for your project # platform :ios, '9.0' use_frameworks! target 'NotificiationList' do pod 'Alamofire' pod 'SwiftyJSON' pod 'Kingfisher' end <file_sep>// // MainPageViewController.swift // NotificiationList // // Created by <NAME> on 4.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import UIKit class MainPageViewController: UIViewController { @IBOutlet weak var tableView: UITableView! private var mainResponse : BaseResponse? { didSet{ self.tableView.reloadData() } } override func viewDidLoad() { super.viewDidLoad() getMainPageData() tableView.delegate = self tableView.dataSource = self self.tableView.register(UINib(nibName: "MainPageTableViewCell", bundle: nil), forCellReuseIdentifier: "mainPageCell") } private func getMainPageData(){ ApiManager.shared.callHomeFeed(class: BaseResponse.self, success: { (responseObject) in if !responseObject.error{ print(responseObject.message) self.mainResponse = responseObject return } }) { (error) in print(error.localizedDescription) } } } extension MainPageViewController : UITableViewDataSource, UITableViewDelegate{ func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return self.mainResponse?.value.count ?? 0 } func tableView(_ tableView: UITableView, willDisplay cell: UITableViewCell, forRowAt indexPath: IndexPath) { guard let cell = cell as? MainPageTableViewCell, let response = self.mainResponse?.value[indexPath.row] else { return } cell.mainResponse = response } func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { guard let cell = tableView.dequeueReusableCell(withIdentifier: "mainPageCell", for: indexPath) as? MainPageTableViewCell else { return UITableViewCell() } return cell } func tableView(_ tableView: UITableView, heightForRowAt indexPath: IndexPath) -> CGFloat { return 260 } } <file_sep>// // ApiManager.swift // NotificiationList // // Created by <NAME> on 2.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation import Alamofire final class ApiManager { static let shared = ApiManager() private let parameters : Parameters = [ "//PARAMETERS_HERE" : "VALUE_HERE" ] private let parametersHomeFeed : Parameters = [ "//PARAMETERS_HERE" : "VALUE_HERE" ] private let parametersEventDetail : Parameters = [ "//PARAMETERS_ERE" : "VALUE_HERE" ] struct Api { static let baseUrl = "BASE_URL_HERE" } private let header : HTTPHeaders = ["Authorization" : "API_KEY_HERE" , "Content-Type" : "CONTENT_TYPE_HERE"] func call <T:Decodable> (class model: T.Type, success : @escaping (T) -> Void, error : @escaping (Error) -> Void) { Alamofire.request(Api.baseUrl+"/account/UserNotifications", method: .post, parameters: parameters, encoding: URLEncoding.default, headers: header).response { (response) in guard let data = response.data else { fatalError("Data value is nil") } do{ let decodingObject = try JSONDecoder().decode(model.self, from: data) success(decodingObject) }catch let err { error(err) } } } func callHomeFeed <T:Decodable> (class model: T.Type, success : @escaping (T) -> Void, error : @escaping (Error) -> Void) { Alamofire.request(Api.baseUrl+"/Event/HomeFeed", method: .post, parameters: parametersHomeFeed, encoding: URLEncoding.default, headers: header).response { (response) in guard let data = response.data else { fatalError("Data value is nil") } do{ let decodingObject = try JSONDecoder().decode(model.self, from: data) success(decodingObject) }catch let err { error(err) } } } func callEventDetail <T:Decodable> (class model: T.Type, success : @escaping (T) -> Void, error : @escaping (Error) -> Void) { Alamofire.request(Api.baseUrl+"/Event/Detail", method: .post, parameters: parametersEventDetail, encoding: URLEncoding.default, headers: header).response { (response) in guard let data = response.data else { fatalError("Data value is nil") } do{ let decodingObject = try JSONDecoder().decode(model.self, from: data) success(decodingObject) }catch let err{ error(err) } } } } <file_sep>// // Users.swift // NotificiationList // // Created by <NAME> on 1.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES struct Users : Codable { let userName : String let userID : Int let imageUrl : String } <file_sep>// // EventResponse.swift // NotificiationList // // Created by <NAME> on 5.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES class EventResponse : Codable { let value : EventDetail let error : Bool let code : Int let message : String } <file_sep>// // NotificationTableViewCell.swift // NotificiationList // // Created by <NAME> on 1.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import UIKit import Kingfisher class NotificationTableViewCell: UITableViewCell { @IBOutlet weak var userImageView: RoundedImageView! @IBOutlet weak var descriptionLabel: UILabel! @IBOutlet weak var dateLabel: UILabel! var notification: Notification? { didSet { guard let notification = notification else { return } self.userImageView.kf.setImage(with: URL(string: notification.user.imageUrl)!) self.descriptionLabel.text = notification.message // let dateFormatter = DateFormatter() // dateFormatter.dateFormat = "yyyy-MM-dd" // let date = dateFormatter.date(from: notification.serverTime) // GET DATE OBJECT WITH THIS SITE // https://www.nsdateformatter.com date nasıl çevirilir? // self.dateLabel.text = notification. } } } <file_sep>// // Base.swift // NotificiationList // // Created by <NAME> on 4.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES class Base : Codable { let userName : String let imageUrl : String let userAvatarUrl : String let eventTitle : String } <file_sep>// // EventDetail.swift // NotificiationList // // Created by <NAME> on 5.03.2019. // Copyright © 2019 <NAME>. All rights reserved. // import Foundation // YOUR JSON FILE VALUES class EventDetail : Codable { let title : String let eventDescription : String let media : [EventMedia] }
7061e234554d640629e62a3bc7caea1581096f72
[ "Swift", "Ruby", "Markdown" ]
18
Swift
frknbyhn/YOUR_SOCIAL_MEDIA_WITH_YOUR_API
b37ec596842e9ee0985754fafb3eaaae9f39d906
d90dfc49ebd1a1e32c8707dc97e79de5a54adf36
refs/heads/master
<repo_name>symptommap/rshinytemperaturemap<file_sep>/app.R # # This is a Shiny web application. You can run the application by clicking # the 'Run App' button above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) library(httr) library(jsonlite) library(leaflet) library(DBI) library(pool) library(ggplot2) library(ggpubr) library("RPostgres") library(plotly) library(data.table) columns <- c('time','exact') # Define UI for application that draws a histogram ui <- fluidPage( # Application title titlePanel("Crowd sourced temperature data map."), tags$head(tags$script(src = "message-handler.js")), tags$script( ' $(document).ready(function () { navigator.geolocation.getCurrentPosition(onSuccess, onError); $.getJSON(\'http://ip-api.com/json?callback=?\', function(data) { Shiny.setInputValue("ipaddress", data.query); }); function onError (err) { Shiny.onInputChange("geolocation", false); } function onSuccess (position) { setTimeout(function () { var coords = position.coords; console.log(coords.latitude + ", " + coords.longitude); Shiny.setInputValue("geolocation", true); Shiny.setInputValue("lat", coords.latitude); Shiny.setInputValue("long", coords.longitude); }, 1100) } }); ' ), # Sidebar with a slider input for number of bins sidebarLayout( sidebarPanel( # Show a plot of the generated distribution verbatimTextOutput("lat"), verbatimTextOutput("long"), verbatimTextOutput("geolocation"), verbatimTextOutput("ipaddress"), textInput(label="temperature", inputId = "temperature"), actionButton("dosubmit", "Submit temperature"), actionButton("domap", "Update map") #DT::dataTableOutput("readings") ), mainPanel(leafletOutput("readingsmap", height = "800")) ) ) updatemap <- function(input, output) { urltemperature <- "http://127.0.0.1:3000/temperature" r <- GET(url=urltemperature, encode = "json", verbose()) rtext <-content(r,as="text") output$readingsmap <- renderLeaflet({ leaflet() %>% addTiles() %>% setView(input$long, input$lat, zoom = 10) %>% addMarkers( lng = ~ longditude, lat = ~ latitude, data = fromJSON(rtext), clusterOptions = markerClusterOptions(freezeAtZoom = 10) ) }) } # Define server logic required to draw a histogram server <- function(input, output) { output$lat <- renderPrint({ input$lat }) output$long <- renderPrint({ input$long }) output$geolocation <- renderPrint({ input$geolocation }) output$ipaddress <- renderPrint({ input$ipaddress }) updatemap(input,output) observeEvent(input$domap, { updatemap(input,output) }) observeEvent(input$dosubmit, { urltemperature <- "http://127.0.0.1:3000/temperature" temperaturebody <- list(exact=input$temperature, latitude=input$lat, longditude=input$long, time=Sys.time(), ipaddress=input$ipaddress) r <- POST(urltemperature, body = temperaturebody, encode = "json", verbose()) }) } # Run the application shinyApp(ui = ui, server = server)
00e85758d75b6dc549ae66a2d4163cd2a186647c
[ "R" ]
1
R
symptommap/rshinytemperaturemap
ae5d18c21f69d22566db23657c93f5c47bce1dd5
e0478d285eb3b27512949534c50f6cefe3b2e905
refs/heads/master
<repo_name>venkateshwaralu-Testing/GitDemo1<file_sep>/testA8Flow/test_e2eA8.py import time from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC driver = webdriver.Chrome(executable_path="C://Users//Venkateshwaranlu//Documents//chromedriver.exe") driver.get("https://dev-flow.autonom8.com/#/signin") driver.maximize_window() driver.find_element(By.ID, 'username').send_keys("superAdmin") driver.find_element(By.ID, "password").send_keys("<PASSWORD>!") driver.find_element(By.ID,"loginBtn").click() # time.sleep(5) wait = WebDriverWait(driver,10) wait.until(EC.presence_of_element_located((By.CLASS_NAME, "card-grid-assignee"))) cards = driver.find_elements(By.XPATH,"//div[@class='card-grid-footer']") for card in cards: CardName = card.find_element(By.XPATH, "div/div/span[2]").text print(CardName) if CardName == "DemoHideShow": card.find_element(By.XPATH, "div/div/span[2]").click() driver.switch_to.frame("iframe") CardNames = driver.find_elements(By.XPATH,'//div[@class="sections_sectionheader__2RYkq"]') for CardName in CardNames: FindCardName = CardName.find_element(By.XPATH,'div').text print(FindCardName) if FindCardName == "section1": CardName.find_element(By.XPATH,'div').click()
a2f265ebc4a7babbc4de8b91582374e2102e6438
[ "Python" ]
1
Python
venkateshwaralu-Testing/GitDemo1
9ee8ebce9cd0ddd7946e9ab37c06dcdfca15278a
03d79c347ad027fee7e80c8519c80041eb0ad88a
refs/heads/master
<file_sep>namespace ExCSS { internal sealed class JustifyContentProperty : Property { private static readonly IValueConverter StyleConverter = Converters.JustifyContentConverter; public JustifyContentProperty() : base(PropertyNames.JustifyContent) { } internal override IValueConverter Converter => StyleConverter; } }<file_sep>using System; using System.Collections.Generic; namespace ExCSS { public sealed class PseudoClassSelectorFactory { private static readonly Lazy<PseudoClassSelectorFactory> Lazy = new(() => { var factory = new PseudoClassSelectorFactory(); Selectors.Add(PseudoElementNames.Before, PseudoElementSelectorFactory.Instance.Create(PseudoElementNames.Before)); Selectors.Add(PseudoElementNames.After, PseudoElementSelectorFactory.Instance.Create(PseudoElementNames.After)); Selectors.Add(PseudoElementNames.FirstLine, PseudoElementSelectorFactory.Instance.Create(PseudoElementNames.FirstLine)); Selectors.Add(PseudoElementNames.FirstLetter, PseudoElementSelectorFactory.Instance.Create(PseudoElementNames.FirstLetter)); return factory; } ); #region Selectors private static readonly Dictionary<string, ISelector> Selectors = new(StringComparer.OrdinalIgnoreCase) { { PseudoClassNames.Root, SimpleSelector.PseudoClass(PseudoClassNames.Root) }, { PseudoClassNames.Scope, SimpleSelector.PseudoClass(PseudoClassNames.Scope) }, { PseudoClassNames.OnlyType, SimpleSelector.PseudoClass(PseudoClassNames.OnlyType) }, { PseudoClassNames.FirstOfType, SimpleSelector.PseudoClass(PseudoClassNames.FirstOfType) }, { PseudoClassNames.LastOfType, SimpleSelector.PseudoClass(PseudoClassNames.LastOfType) }, { PseudoClassNames.OnlyChild, SimpleSelector.PseudoClass(PseudoClassNames.OnlyChild) }, { PseudoClassNames.FirstChild, SimpleSelector.PseudoClass(PseudoClassNames.FirstChild) }, { PseudoClassNames.LastChild, SimpleSelector.PseudoClass(PseudoClassNames.LastChild) }, { PseudoClassNames.Empty, SimpleSelector.PseudoClass(PseudoClassNames.Empty) }, { PseudoClassNames.AnyLink, SimpleSelector.PseudoClass(PseudoClassNames.AnyLink) }, { PseudoClassNames.Link, SimpleSelector.PseudoClass(PseudoClassNames.Link) }, { PseudoClassNames.Visited, SimpleSelector.PseudoClass(PseudoClassNames.Visited) }, { PseudoClassNames.Active, SimpleSelector.PseudoClass(PseudoClassNames.Active) }, { PseudoClassNames.Hover, SimpleSelector.PseudoClass(PseudoClassNames.Hover) }, { PseudoClassNames.Focus, SimpleSelector.PseudoClass(PseudoClassNames.Focus) }, { PseudoClassNames.FocusVisible, SimpleSelector.PseudoClass(PseudoClassNames.FocusVisible) }, { PseudoClassNames.FocusWithin, SimpleSelector.PseudoClass(PseudoClassNames.FocusWithin) }, { PseudoClassNames.Target, SimpleSelector.PseudoClass(PseudoClassNames.Target) }, { PseudoClassNames.Enabled, SimpleSelector.PseudoClass(PseudoClassNames.Enabled) }, { PseudoClassNames.Disabled, SimpleSelector.PseudoClass(PseudoClassNames.Disabled) }, { PseudoClassNames.Default, SimpleSelector.PseudoClass(PseudoClassNames.Default) }, { PseudoClassNames.Checked, SimpleSelector.PseudoClass(PseudoClassNames.Checked) }, { PseudoClassNames.Indeterminate, SimpleSelector.PseudoClass(PseudoClassNames.Indeterminate) }, { PseudoClassNames.PlaceholderShown, SimpleSelector.PseudoClass(PseudoClassNames.PlaceholderShown) }, { PseudoClassNames.Unchecked, SimpleSelector.PseudoClass(PseudoClassNames.Unchecked) }, { PseudoClassNames.Valid, SimpleSelector.PseudoClass(PseudoClassNames.Valid) }, { PseudoClassNames.Invalid, SimpleSelector.PseudoClass(PseudoClassNames.Invalid) }, { PseudoClassNames.Required, SimpleSelector.PseudoClass(PseudoClassNames.Required) }, { PseudoClassNames.ReadOnly, SimpleSelector.PseudoClass(PseudoClassNames.ReadOnly) }, { PseudoClassNames.ReadWrite, SimpleSelector.PseudoClass(PseudoClassNames.ReadWrite) }, { PseudoClassNames.InRange, SimpleSelector.PseudoClass(PseudoClassNames.InRange) }, { PseudoClassNames.OutOfRange, SimpleSelector.PseudoClass(PseudoClassNames.OutOfRange) }, { PseudoClassNames.Optional, SimpleSelector.PseudoClass(PseudoClassNames.Optional) }, { PseudoClassNames.Shadow, SimpleSelector.PseudoClass(PseudoClassNames.Shadow) } }; #endregion internal static PseudoClassSelectorFactory Instance => Lazy.Value; public ISelector Create(string name) { return Selectors.TryGetValue(name, out var selector) ? selector : null; } } }<file_sep>namespace ExCSS { public enum JustifyContent : byte { Start, Center, SpaceBetween, SpaceAround, SpaceEvenly } }<file_sep>using System.Linq; namespace ExCSS.Tests { using ExCSS; using Xunit; public class JustifyContentPropertyTests : CssConstructionFunctions { [Fact] public void JustifyContentCenter() { var snippet = "justify-content: center"; var property = ParseDeclaration(snippet); Assert.Equal("justify-content", property.Name); Assert.False(property.IsImportant); Assert.IsType<JustifyContentProperty>(property); var concrete = (JustifyContentProperty)property; Assert.False(concrete.IsInherited); Assert.True(concrete.HasValue); Assert.Equal("center", concrete.Value); } [Fact] public void JustifyContentStart() { var snippet = "justify-content: start"; var property = ParseDeclaration(snippet); Assert.Equal("justify-content", property.Name); Assert.False(property.IsImportant); Assert.IsType<JustifyContentProperty>(property); var concrete = (JustifyContentProperty)property; Assert.False(concrete.IsInherited); Assert.True(concrete.HasValue); Assert.Equal("start", concrete.Value); } [Fact] public void JustifyContentSpaceAround() { var snippet = "justify-content: space-around"; var property = ParseDeclaration(snippet); Assert.Equal("justify-content", property.Name); Assert.False(property.IsImportant); Assert.IsType<JustifyContentProperty>(property); var concrete = (JustifyContentProperty)property; Assert.False(concrete.IsInherited); Assert.True(concrete.HasValue); Assert.Equal("space-around", concrete.Value); } [Fact] public void JustifyContentSpaceBetween() { var snippet = "justify-content: space-between"; var property = ParseDeclaration(snippet); Assert.Equal("justify-content", property.Name); Assert.False(property.IsImportant); Assert.IsType<JustifyContentProperty>(property); var concrete = (JustifyContentProperty)property; Assert.False(concrete.IsInherited); Assert.True(concrete.HasValue); Assert.Equal("space-between", concrete.Value); } [Fact] public void JustifyContentSpaceEvenly() { var snippet = "justify-content: space-evenly"; var property = ParseDeclaration(snippet); Assert.Equal("justify-content", property.Name); Assert.False(property.IsImportant); Assert.IsType<JustifyContentProperty>(property); var concrete = (JustifyContentProperty)property; Assert.False(concrete.IsInherited); Assert.True(concrete.HasValue); Assert.Equal("space-evenly", concrete.Value); } [Fact] public void ParseJustifyContent() { var parser = new StylesheetParser(); var sheet = parser.Parse("a {justify-content: space-evenly; }"); var style = ((StyleRule)sheet.StyleRules.First()).Style; Assert.Equal("space-evenly", style.JustifyContent); } } }
d730e2fb144d661de319fc2c1304a9fd744e4894
[ "C#" ]
4
C#
ManuelHaas/ExCSS
9916ad14860a6044009f5be33a13114c106cbdfc
030f3007257dd6b74412de00e48b6d27ae6f8219
refs/heads/master
<repo_name>mrpiotr-dev/dotnetmvc-hot-simple-example<file_sep>/README.md ## Prerequisites - install `dotnet` SDK - https://dotnet.microsoft.com/learn/aspnet/hello-world-tutorial/install ## Description I based this simple implementation on the official tutorial: " ASP.NET Tutorial - Hello World in 10 minutes" https://dotnet.microsoft.com/learn/aspnet/hello-world-tutorial//intro. I want to show how you can add and use Handsontable in your `dotnet mvc` project. I used the newest Handsontable and Axios libraries. Axios provides a powerful API to communicate with the server. I showed two simple actions - how to load data after page load and how to collect changes on the server-side. Any other functionalities like: - server-side search, - inserting new rows, - removing rows, might work analogously. <file_sep>/Models/User.cs using System; namespace DotnetHandsontableExample.Models { public class User { private int id; private string firstName; private string lastName; private string email; private bool active; public User(int id, string firstName, string lastName, string email, bool active) { this.id = id; this.firstName = firstName; this.lastName = lastName; this.email = email; this.active = active; } public int Id { get { return id; } set { id = value; } } public string FirstName { get { return firstName; } set { firstName = value; } } public string LastName { get { return lastName; } set { lastName = value; } } public string Email { get { return email; } set { email = value; } } public bool Active { get { return active; } set { active = value; } } } } <file_sep>/Controllers/HomeController.cs using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Threading.Tasks; using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Logging; using DotnetHandsontableExample.Models; namespace DotnetHandsontableExample.Controllers { public class HomeController : Controller { private readonly ILogger<HomeController> _logger; public HomeController(ILogger<HomeController> logger) { _logger = logger; } public IActionResult Index() { return View(); } public JsonResult GetUsers() { return Json(new Object[] { new User(1, "AAAA", "zzzz", "<EMAIL>", true), new User(2, "BBBB", "yyyy", "<EMAIL>", true), new User(3, "CCCC", "xxxx", "<EMAIL>", false), new User(4, "DDDD", "wwww", "<EMAIL>", true), new User(5, "EEEE", "vvvv", "<EMAIL>", true), }); } [HttpPut] public JsonResult UpdateUser(int id, [FromBody] object user) { return Json(new Object[] { id, user }); } [ResponseCache(Duration = 0, Location = ResponseCacheLocation.None, NoStore = true)] public IActionResult Error() { return View(new ErrorViewModel { RequestId = Activity.Current?.Id ?? HttpContext.TraceIdentifier }); } } }
743a622831396194d69f5ffda4a8971cbbfce37e
[ "Markdown", "C#" ]
3
Markdown
mrpiotr-dev/dotnetmvc-hot-simple-example
642e93514b8ee185a50ad181e67ec8bd2280d5fc
69459c443723195807fd3db105c7cdca0804d0fb
refs/heads/master
<repo_name>kennycaiguo/studygo-4<file_sep>/project/config/main.go package main import ( "fmt" goconf "github.com/Unknwon/goconfig" ) var ( ConfigFile = "config.ini" ) func main() { Conf, err := goconf.LoadConfigFile(ConfigFile) if err != nil { fmt.Println("can't parse config file:" + ConfigFile) } v, err := Conf.GetValue("Demo", "key2") if err != nil { fmt.Println("can't get config value {key2}:" + v) } else { fmt.Println("Demo.key2 = " + v) } }<file_sep>/snippet/httpClient.go package main import ( "compress/gzip" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "os" "reflect" ) var ( Url = "http://ip.taobao.com/service/getIpInfo.php?ip=myip" Header = map[string]string{ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Connection": "keep-alive", "Accept-Encoding": "gzip", "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0", } ) type IpLocation struct { country, country_id, area, area_id, region, region_id, city, city_id, county_id, isp, isp_id, ip string } func main() { bodyByte, err := curl(Url, Header) if err != nil { fmt.Println("curl error:" + err.Error()) os.Exit(1) } dataJson, err := jsonDecode(bodyByte) if err != nil { fmt.Println("jsonDecode error:" + err.Error()) os.Exit(1) } data := dataJson["data"] fmt.Printf("%#v\n\n", data) dataMap := (data).(map[string]interface{}) fmt.Printf("IpLocation: %s: %s%s\n\n", dataMap["ip"], dataMap["country"], dataMap["region"]) for index, element := range data.(map[string]interface{}) { switch value := element.(type) { case int: fmt.Printf("list[%d]\t\t\t\t,value is %d\n", index, value) default: fmt.Printf("list[%s]\t\t\t\t,value is %s\n", index, value) } } ip, _ := getValue(data, "ip") country, _ := getValue(data, "country") fmt.Printf("IP: %s, location: %s", ip, country) } func curl(url string, Header map[string]string) ([]byte, error) { client := &http.Client{} reqest, err := http.NewRequest(http.MethodPost, url, nil) if err != nil { fmt.Println("http.NewRequest error: ", err.Error()) os.Exit(0) } for k, v := range Header { reqest.Header.Add(k, v) } response, err := client.Do(reqest) defer response.Body.Close() if err != nil { fmt.Println("http.Client.do error ", err.Error()) } if response.StatusCode >= 400 { return nil, errors.New("http.StatusCode: " + response.Status) } //需要在 switch 外面声明 bodyByte , switch 中声明的 bodyByte 为局部变量 var bodyByte []byte switch response.Header.Get("Content-Encoding") { case "gzip": reader, _ := gzip.NewReader(response.Body) defer reader.Close() bodyByte, _ = ioutil.ReadAll(reader) default: bodyByte, _ = ioutil.ReadAll(response.Body) } return bodyByte, nil } func jsonDecode(bodyByte []byte) (map[string]interface{}, error) { body := make(map[string]interface{}) err := json.Unmarshal(bodyByte, &body) if err != nil { return nil, err } return body, nil } func getValue(d interface{}, label string) (interface{}, bool) { switch reflect.TypeOf(d).Kind() { case reflect.Struct: v := reflect.ValueOf(d).FieldByName(label) return v.Interface(), true case reflect.Map: inter := reflect.ValueOf(d).Interface() dict := inter.(map[string]interface{}) if val, ok := dict[label]; ok { return val, ok } return nil, false } return nil, false } <file_sep>/snippet/interface.02.go package main import "fmt" func main() { var t interface{} t = 1 t0 := (t).(int) fmt.Printf("%v\t%v\n", t0, &t0) t = "ok" t1 := (t).(string) fmt.Printf("%v\t%v\n", t1, &t1) }<file_sep>/snippet/json-gjson.go package main import ( "fmt" "github.com/tidwall/gjson" ) var TestJson = `{"took":200,"timed_out":false,"_shards":{"total":11,"successful":11,"failed":0},"hits":{"total":648875,"max_score":0.0,"hits":[]},"aggregations":{"day":{"doc_count_error_upper_bound":0,"sum_other_doc_count":0,"buckets":[{"key":"20170417","doc_count":648875,"channel":{"doc_count_error_upper_bound":0,"sum_other_doc_count":0,"buckets":[{"key":"kingpin_09","doc_count":648875,"acttype":{"doc_count_error_upper_bound":0,"sum_other_doc_count":0,"buckets":[{"key":"show","doc_count":406595,"ct":{"value":139409}},{"key":"click","doc_count":242280,"ct":{"value":111973}}]}}]}}]}}}` func main() { result := gjson.Get(TestJson, "aggregations") iter(result) } func iter(value gjson.Result) []string { k := []string{} if value.Type.String() == "JSON" { value.ForEach(func(key, value gjson.Result) bool { a := gjson.Get(value.Raw, "value") if a.Exists() { fmt.Println(key, "'s value = ", a.String()) } b := gjson.Get(value.Raw, "buckets") if b.Exists() { iterArray(b) } fmt.Println(key) return true }) } return k } func iterArray(value gjson.Result) { value.ForEach(func(key, value gjson.Result) bool { iter(value) return true }) } <file_sep>/snippet/jsonWithUnknownFields.go package main import ( "encoding/json" "fmt" ) type Foo struct { A int `json:"a"` B int `json:"b"` X map[string]interface{} `json:"-"` // Rest of the fields should go here. } func main() { s := `{"a":1, "b":2, "x":1, "y":1}` f := Foo{} if err := json.Unmarshal([]byte(s), &f.X); err != nil { panic(err) } fmt.Printf("%+v\n\n", f) if n, ok := f.X["a"].(float64); ok { f.A = int(n) } if n, ok := f.X["b"].(float64); ok { f.B = int(n) } delete(f.X, "a") delete(f.X, "b") fmt.Printf("%+v", f) } <file_sep>/snippet/channel.go package main import ( "fmt" "time" ) var c chan int func ready(w string, sec int) { time.Sleep(time.Millisecond * time.Duration(sec)) fmt.Println(w, "is ready") c <- sec } func main() { c = make(chan int) go ready("Tee", 2) go ready("Coffee", 1) fmt.Println("I'm waiting") x, y := <-c, <-c fmt.Printf("x=%s, y=%s", x, y) } <file_sep>/tests/php_test.go package tests import ( "fmt" "github.com/vus520/studygo/utils" "runtime" "testing" "time" ) func Test_FileGetContents(t *testing.T) { if runtime.GOOS == "linux" || runtime.GOOS == "darwin" { if !utils.IsFile("/etc/passwd") { t.Error("/etc/password not found on Linux system") t.Fail() } if utils.IsFile("/youmusthavethisfileondisk-_-") { t.Error("file exists is NOT-reasonable.") t.Fail() } } else { t.Skip("not support system") } } func Test_FilePutContents(t *testing.T) { tmpfile := "/tmp/gotestputcontents.tmp" if runtime.GOOS == "linux" || runtime.GOOS == "darwin" { timestamp := fmt.Sprintf("randstr: %d", time.Now().Unix()) utils.FilePutContents(tmpfile, timestamp) data, _ := utils.FileGetContents(tmpfile) utils.Unlink(tmpfile) if timestamp != data { t.Error("file put contents not match read contents.") t.Fail() } } else { t.Skip("not support system") } } <file_sep>/snippet/array.go package main import "fmt" type User struct { Id int Name string } func main() { a := [...]User{ {0, "User0"}, {8, "User8"}, } b := [...]User{ {0, "User0"}, {8, "User8"}, } c := [...]*User{ {0, "User0"}, {8, "User8"}, } d := [...]*User{ {0, "User0"}, {8, "User8"}, } fmt.Println(a, len(a)) fmt.Println(b, len(b)) fmt.Println(c, len(c)) fmt.Println(d, len(d)) } <file_sep>/snippet/json.go package main import ( "fmt" "github.com/json-iterator/go" "github.com/vus520/studygo/utils" ) var ( IpApiUrl = "http://ip.taobao.com/service/getIpInfo2.php?ip=8.8.8.8" ) func test0(data string) { iter := jsoniter.ParseString(data) r := iter.Read() fmt.Println(r) } func test1(data string) { iter := jsoniter.ParseString(data) r := iter.Read() fmt.Println(r) } func main() { data, err := utils.FileGetContents(IpApiUrl) if err != nil { panic("FileGetContents returns " + err.Error()) return } test0(data) test1(data) } <file_sep>/snippet/pointers.02.go package main import ( "fmt" "strings" ) type User struct { Id int Name string } func test0() { up := User{0, "nobody"} up.Id = 1 up.Name = "Jack" fmt.Println("第一次值", up) u2 := up u2.Name = "Tom" fmt.Println("修改后原来的值", up) fmt.Println("修改的值", u2) } func test1() { up := &User{0, "nobody"} up.Id = 1 up.Name = "Jack" fmt.Println("第一次值", up) u2 := *up u2.Name = "Tom" fmt.Println("修改后原来的值", up) fmt.Println("修改的值", u2) } func test2() { up := User{0, "nobody"} up.Id = 1 up.Name = "Jack" fmt.Println("第一次值", up) u2 := &up u2.Name = "Tom" fmt.Println("修改后原来的值", up) fmt.Println("修改的值", u2) } /** -------------------------------------------------- 第一次值 {1 Jack} 修改后原来的值 {1 Jack} 修改的值 {1 Tom} -------------------------------------------------- 第一次值 &{1 Jack} 修改后原来的值 &{1 Jack} 修改的值 {1 Tom} -------------------------------------------------- 第一次值 {1 Jack} 修改后原来的值 {1 Tom} 修改的值 &{1 Tom} **/ func main() { fmt.Println(strings.Repeat("-", 50)) test0() fmt.Println(strings.Repeat("-", 50)) test1() fmt.Println(strings.Repeat("-", 50)) test2() }<file_sep>/lovebizhi.go package main import ( "fmt" "github.com/bitly/go-simplejson" "github.com/vus520/studygo/utils" "os" "path/filepath" "runtime" "strconv" "strings" "sync" "time" ) var ( DataRoot = "./tmp/lovebizhi/" PageUrl = "http://api.lovebizhi.com/macos_v4.php?a=category&spdy=1&tid=3&order=hot&device=105&uuid=436e4ddc389027ba3aef863a27f6e6f9&mode=0&retina=0&client_id=1008&device_id=31547324&model_id=105&size_id=0&channel_id=70001&screen_width=1920&screen_height=1200&bizhi_width=1920&bizhi_height=1200&version_code=19&language=zh-Hans&jailbreak=0&mac=&p={pid}" w sync.WaitGroup ) // 壁纸类型,有编号,长宽和URL type Wallpaper struct { Pid int Url string Width int Height int } // 将图片下载并保存到本地 func SaveImage(paper *Wallpaper) { //按分辨率目录保存图片 Dirname := DataRoot + strconv.Itoa(paper.Width) + "x" + strconv.Itoa(paper.Height) + "/" if !utils.IsDir(Dirname) { os.MkdirAll(Dirname, 0755) } //根据URL文件名创建文件 filename := Dirname + filepath.Base(paper.Url) if utils.IsFile(filename) { return } w.Add(1) timeStart := time.Now().Unix() Body, err := utils.FileGetContents(paper.Url) if err == nil { utils.FilePutContents(filename, Body) } timeEnd := time.Now().Unix() fmt.Printf("%d: %s, 用时: %d (%d-%d) 秒, err:%s\n", paper.Pid, paper.Url, timeEnd-timeStart, timeStart, timeEnd, err) w.Done() } func main() { runtime.GOMAXPROCS(12) for i := 1; i < 10; i++ { url := strings.Replace(PageUrl, "{pid}", strconv.Itoa(i), -1) fmt.Printf("Page %d: %s\n", i, url) body, err := utils.FileGetContents(url) if err != nil { fmt.Println(err) continue } js, err := simplejson.NewJson([]byte(body)) //遍历data下的所有数据 data := js.Get("data").MustArray() for _, v := range data { v := v.(map[string]interface{}) for kk, vv := range v { if kk == "file_id" { //这里 vv 是一个[]interface{} json.Number,不知道怎么取出值,这里用了比较傻的Sprintf vv := fmt.Sprintf("%s", vv) imgid, _ := strconv.Atoi(vv) url := fmt.Sprintf("http://s.qdcdn.com/c/%d,1920,1200.jpg", imgid) paper := &Wallpaper{imgid, url, 1920, 1200} go SaveImage(paper) } } } } w.Wait() fmt.Println("oh yes, all job done.") } <file_sep>/README.md # studygo studygo, gogogo run ==================== ``` go run kanmeizi.go go run lovebizhi.go ``` test ==================== ``` cd tests go test -v ```<file_sep>/snippet/interface.go package main import ( "fmt" "reflect" "strconv" ) func main() { //interface类型 //interface类型定义了一组方法,如果某个对象实现了某个接口的"所有方法",则此对象就实现了此接口 //interface可以被任意的对象实现,一个对象可以实现任意多个interface //任意的类型都实现了空interface(我们这样定义:interface{}),也就是包含0个method的interface。 //interface的值 /* mike := student{Human{"mike", 25}, "110"} paul := student{Human{"paul", 26}, "120"} lucy := employee{Human{"lucy", 18}, "001"} lily := employee{Human{"lily", 20}, "002"} //定义common类型的接口变量co var co common //co能够存储mike co = mike co.sayHi() co.sing() //co能够存储paul co = paul co.sayHi() co.sing() //co能够存储lucy co = lucy co.sayHi() co.sing() //co能够存储lily co = lily co.sayHi() co.sing() */ //空interface //空interface(interface{})不包含任何的method,正因为如此,所有的类型都实现了空interface。 //空interface在我们需要存储任意类型的数值的时候相当有用,因为它可以存储任意类型的数值。 /* var nullInterface interface{} var i int = 5 var str string str = "Hello world" Jim := student{Human{"Jim", 27}, "101"} nullInterface = i nullInterface = str nullInterface = Jim //一个函数把interface{}作为参数,那么他可以接受任意类型的值作为参数, //如果一个函数返回interface{},那么也就可以返回任意类型的值。 userInterfaceParam(nullInterface) fmt.Println("...") */ //interface函数参数 //任何实现了String方法的类型都能作为参数被fmt.Println调用 //实现了error接口的对象(即实现了Error() string的对象), //使用fmt输出时,会调用Error()方法,因此不必再定义String()方法了 //interface变量存储的类型 //知道interface的变量里面可以存储任意类型的数值(该类型实现了interface)。 //怎么反向知道这个变量里面实际保存了的是哪个类型的对象 //Comma-ok断言 //Go语言里面有一个语法,可以直接判断是否是该类型的变量: value, ok = element.(T), //这里value就是变量的值,ok是一个bool类型,element是interface变量,T是断言的类型。 //如果element里面确实存储了T类型的数值,那么ok返回true,否则返回false。 //示例 type Element interface{} type List []Element list := make(List, 3) list[0] = 1 list[1] = "HelloWorld" list[2] = Human{"yang", 27} for index, element := range list { switch value := element.(type) { case int: fmt.Printf("list[%d] ,value is %d\n", index, value) case string: fmt.Printf("list[%d] ,value is %s\n", index, value) case Human: fmt.Printf("list[%d] ,value is %s\n ", index, value) default: fmt.Printf("list[%d] ,value is \n", index) } } //嵌入interface //如果一个interface1作为interface2的一个嵌入字段,那么interface2隐式的包含了interface1里面的method。 //反射 //1:反射成reflect对象-->2:对reflect对象进行操作,比如获取它的值,或修改它的值 //1:反射成reflect对象 //t := reflect.TypeOf(i) //得到类型的元数据,通过t我们能获取类型定义里面的所有元素 //v := reflect.ValueOf(i) //得到实际的值,通过v我们获取存储在里面的值,还可以去改变值 //2:对reflect对象进行操作,引入reflect包 //tag := t.Elem().Field(0).Tag //获取定义在struct里面的标签 //name := v.Elem().Field(0).String() //获取存储在第一个字段里面的值 //示例 //获取值和类型 var x float64 = 3.4 v := reflect.ValueOf(x) fmt.Println("type:", v.Type()) fmt.Println("kind is float64:", v.Kind() == reflect.Float64) fmt.Println("value:", v.Float()) //修改值 要使用引用 var f float32 = 2.9 ff := reflect.ValueOf(&f) ff.Elem().SetFloat(3.8) fmt.Println(f) //这们会出错 //ff := reflect.ValueOf(f) //ff.SetFloat(3.8) } type Human struct { name string age int } type student struct { Human schoolNumber string } type employee struct { Human employeeNumber string } func (h Human) sayHi() { fmt.Println("Hi!") } func (h Human) sing() { fmt.Println("la la la ~~") } func (s student) readBook() { fmt.Println(" reading book") } func (e employee) work() { fmt.Println("I'm working") } //Human、student、employee都实现了这个接口 type common interface { sayHi() sing() } //student实现了这个接口 type stuInterface interface { sayHi() sing() readBook() } //employee实现了这个接口 type empInterface interface { sayHi() sing() work() } //接收和返回interface类型,如果interface{}为空,那么它可以接收和返回任意类型的参数和值 func userInterfaceParam(i interface{}) interface{} { return i } func (h Human) String() string { return "(name: " + h.name + " - age: " + strconv.Itoa(h.age) + " years)" } <file_sep>/project/zip/zip.go package main import ( "archive/zip" "fmt" "io" "log" "os" "path/filepath" "strings" ) const MODE_ZIP string = "zip" //只备份到目标文件夹中对应的 const MODE_UNZIP string = "unzip" //只将源文件夹拷贝到对应目录,不进行备份 //TODO 不支持仅拷贝,太危险了 //传递两个参数进来 func main() { handleArgs() } func handleArgs() { args := os.Args length := len(args) if length < 4 { log.Println("args not enough") return } mode := os.Args[1] if !modeMatchs(mode) { panic("oh, bad mode !") } if strings.EqualFold(mode, MODE_ZIP) { sourceDirs := args[2 : length-1] //默认在当前目录创建压缩包 targetZipFile := args[length-1] doZip(sourceDirs, targetZipFile) } else if strings.EqualFold(mode, MODE_UNZIP) { sourceZipFile := args[2] targetUnzipDir := args[3] doUnzip(sourceZipFile, targetUnzipDir) } } func doUnzip(sourceZipFile, targetUnzipDir string) { if !strings.HasSuffix(sourceZipFile, ".zip") { panic("unzip source file must be zip format") } if err := makeParent(targetUnzipDir); err != nil { fmt.Printf("make target parnet err: %s\n", err) panic("make target parent dir failed") } log.Println("doUnzip --- sourceZipFile: " + sourceZipFile) log.Println("doUnzip --- targetUnzipDir: " + targetUnzipDir) DeCompress(sourceZipFile, targetUnzipDir) } func doZip(sourceDirs []string, targetZipFile string) { log.Println("targetZipFile: " + targetZipFile) if !strings.HasSuffix(targetZipFile, ".zip") { panic("target file must be zip format") } if err := makeParent(targetZipFile); err != nil { fmt.Printf("make parnet err: %s\n", err) panic("make target parent dir failed") } var files []*os.File for _, f := range sourceDirs { if dir, err := os.Open(f); err != nil { fmt.Printf("sourceFile open failed: %s\n", err) panic("sourceFile open failed") } else { files = append(files, dir) } } Compress(files, targetZipFile) } func makeParent(dir string) error { parent := filepath.Dir(dir) if exists, _ := PathExists(parent); exists { return nil } fmt.Println("make parent: " + parent) return makeSureDir(parent) } func makeSureDir(dir string) error { if exists, _ := PathExists(dir); !exists { // err := os.Mkdir(bkDir, os.ModePerm) fmt.Println("makeing dir: " + dir) err := os.MkdirAll(dir, os.ModePerm) if err != nil { fmt.Printf("mkdir failed![%v]\n", err) return err } } return nil } // 判断文件夹是否存在 func PathExists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return false, err } //压缩文件 //files 文件数组,可以是不同dir下的文件或者文件夹 //dest 压缩文件存放地址 func Compress(files []*os.File, dest string) error { d, _ := os.Create(dest) defer d.Close() w := zip.NewWriter(d) defer w.Close() for _, file := range files { err := compress(file, "", w) if err != nil { return err } } return nil } func compress(file *os.File, prefix string, zw *zip.Writer) error { info, err := file.Stat() if err != nil { return err } if info.IsDir() { prefix = prefix + "/" + info.Name() fileInfos, err := file.Readdir(-1) if err != nil { return err } for _, fi := range fileInfos { f, err := os.Open(file.Name() + "/" + fi.Name()) if err != nil { return err } err = compress(f, prefix, zw) if err != nil { return err } } } else { header, err := zip.FileInfoHeader(info) header.Name = prefix + "/" + header.Name if err != nil { return err } writer, err := zw.CreateHeader(header) if err != nil { return err } _, err = io.Copy(writer, file) file.Close() if err != nil { return err } } return nil } //解压 func DeCompress(zipFile, dest string) error { dest = dest + "/"; dest = strings.Replace(dest, "\\", "/", -1) dest = strings.Replace(dest, "//", "/", -1) log.Printf("unzip %s to %s", zipFile, dest) reader, err := zip.OpenReader(zipFile) if err != nil { return err } defer reader.Close() for _, file := range reader.File { rc, err := file.Open() if err != nil { return err } defer rc.Close() filename := dest + file.Name log.Println(filename) err = os.MkdirAll(getDir(filename), 0755) if err != nil { return err } w, err := os.Create(filename) if err != nil { return err } defer w.Close() _, err = io.Copy(w, rc) if err != nil { return err } w.Close() rc.Close() } return nil } func getDir(path string) string { return subString(path, 0, strings.LastIndex(path, "/")) } func subString(str string, start, end int) string { rs := []rune(str) length := len(rs) if start < 0 || start > length { panic("start is wrong") } if end < start || end > length { panic("end is wrong") } return string(rs[start:end]) } //测试阶段,只支持bk和bkc func modeMatchs(mode string) bool { // if strings.EqualFold(mode, MODE_BK) || strings.EqualFold(mode, MODE_BKCOPY) || strings.EqualFold(mode, MODE_COPY) { if strings.EqualFold(mode, MODE_ZIP) || strings.EqualFold(mode, MODE_UNZIP) { return true } return false } <file_sep>/snippet/pointers.go package main import "fmt" func main() { s := "this first str" ss := s; fmt.Println("ss:" + ss) s = "this secend str" fmt.Println("after modify ss:" + ss) aa := &s s = "this third str" fmt.Println("aa = &s; *ss:" + *aa) i := 1 fmt.Println("initial:", i) zeroval(i) fmt.Println("zeroval:", i) zeroptr(&i) fmt.Println("zeroptr:", i) zeroval(i) fmt.Println("zeroval:", i) fmt.Println("pointer:", &i) } func zeroval(ival int) { ival = 0 } func zeroptr(iptr *int) { *iptr = 0 }<file_sep>/snippet/interface.switch.go package main import "fmt" func main() { var a interface{} = "i'm string" switch a.(type) { case string: fmt.Println("interface a's type is string") } } <file_sep>/kanmeizi.go /* 抓点妹子图 技术点 goroutine WaitGroup http get regexp group file dir read, write variable loop printf import get */ package main import ( "crypto/md5" "fmt" "github.com/vus520/studygo/utils" "os" "regexp" "runtime" "sync" "time" ) var imgList = make([]interface{}, 0, 1) var pageList = make([]interface{}, 0, 1) var timeStart = time.Now().Unix() var w sync.WaitGroup func main() { fmt.Println("生成任务,开始抓取") runtime.GOMAXPROCS(8) os.Mkdir("tmp", 0777) for i := 1; i < 2; i++ { //任务计数器增加 w.Add(1) go func(i int) { url := fmt.Sprintf("http://www.kanmeizi.cn/tag_%d_1_16.html", i) fmt.Printf("Job: %s\n", url) body, _ := utils.FileGetContents(url) format(body) //任务计数器完成 w.Done() }(i) } //等待任务计数器完成并清空,退出进程 w.Wait() for i := range pageList { w.Add(1) go func(i int) { url := fmt.Sprintf("http://www.kanmeizi.cn%s", pageList[i]) fmt.Printf("Job: %s\n", url) body, _ := utils.FileGetContents(url) format(body) //任务计数器完成 w.Done() }(i) } w.Wait() timeEnd := time.Now().Unix() fmt.Printf("页面抓取完成,获取图片: %d 张, 用时: %d 秒\n", len(imgList), timeEnd-timeStart) imgList = utils.Slice_unique(imgList) for i := range imgList { url := fmt.Sprintf("%s", imgList[i]) img, _ := utils.FileGetContents(url) file := fmt.Sprintf("./tmp/%x.png", md5.Sum([]byte(img))) utils.FilePutContents(string(file), img) } fmt.Printf("下载图片: %d, 用时: %d s\n", len(imgList), time.Now().Unix()-timeEnd) } //格式化页面,读取图片地址,分图片地址,存入全局变量 func format(body string) { r, _ := regexp.Compile(`<img class="height_min"[^>]+src="(?P<src>.*?)"`) // Compile vs MustCompile // FindAllStringSubmatch vs FindAllString vs FindStringSubmatch img := r.FindAllStringSubmatch(body, -1) for i := range img { imgList = append(imgList, img[i][1]) } r, _ = regexp.Compile(`<a href="([^"]+)" data-page="\d+">`) // Compile vs MustCompile // FindAllStringSubmatch vs FindAllString vs FindStringSubmatch page := r.FindAllStringSubmatch(body, -1) for i := range page { pageList = append(pageList, page[i][1]) } }
f1380b9c866dd912aed5e7edc855ac6078fb3b38
[ "Markdown", "Go" ]
17
Go
kennycaiguo/studygo-4
d39616915d73979e789f5a2ec7eafbaa846fd95a
159f2a71257a71d063e173170e17ed10cf7d4659
refs/heads/master
<repo_name>fjriosp/tdsayuno<file_sep>/upgrade #! /bin/bash pushd $(dirname $0) > /dev/null error() { echo "Error: ${1}." exit 1 } # Create if not exists if [ ! -e tdsayuno.db ] then echo "Creating database..." sqlite3 tdsayuno.db < sql/create.sql || error "creating database." fi # Check Backup if [ -e tdsayuno.db.bak ] then echo "Backup file tdsayuno.db.bak exists!" echo "If you really want to upgrade, delete it." echo " rm tdsayuno.db.bak" exit 1 fi # Backup database cp tdsayuno.db tdsayuno.db.bak # Upgrade VERSION=$(sqlite3 tdsayuno.db 'SELECT VERSION FROM VERSION') while [ -e sql/upgrade_${VERSION}.sql ] do echo "Upgrading database (${VERSION})..." sqlite3 tdsayuno.db < sql/upgrade_${VERSION}.sql || error "upgrading database." VERSION=$(sqlite3 tdsayuno.db 'SELECT VERSION FROM VERSION') done echo "Database upgraded to ${VERSION}" # Remove backup rm tdsayuno.db.bak popd > /dev/null <file_sep>/sql/upgrade_0.1.sql .bail ON ALTER TABLE "user" ADD COLUMN pref_food INTEGER; ALTER TABLE "user" ADD COLUMN pref_drink INTEGER; CREATE TABLE food ( id INTEGER NOT NULL, name VARCHAR, price FLOAT, PRIMARY KEY (id) ); CREATE TABLE drink ( id INTEGER NOT NULL, name VARCHAR, price FLOAT, PRIMARY KEY (id) ); UPDATE "version" SET "version" = '0.2'; <file_sep>/view.py # -*- coding: utf-8 -*- import time from functools import wraps from uuid import uuid4 from hashlib import md5 from flask import Flask, request, redirect, url_for, render_template, make_response app = Flask('tdsayuno',static_url_path='') from model import db,User,Food,Drink # Devuelve el usuario que está autenticado def getUserBySession(): session = request.cookies.get('session') if not session: return None if session: u = User.query.filter_by(session=session).first() return u def auth(f,admin=False): @wraps(f) def auth_f(*args, **kwargs): u = getUserBySession() if not u: return redirect(url_for('index')) if admin and not u.isAdmin: return redirect(url_for('index')) kwargs['user'] = u return f(*args, **kwargs) return auth_f @app.route('/') def index(): u = getUserBySession() if not u: return render_template('login.html') return render_template('index.html',u=u) @app.route('/login', methods=['GET','POST']) def login(): if request.method == 'GET': return redirect(url_for('index')) user = request.form['user'] passwd = request.form['passwd'] seed = int(request.form['seed']) print 'Intento de login del usuario "%s".' % (user) print user print passwd print seed # Compruebo que el usuario existe u = User.query.filter_by(user=user).first() if not u: print 'Usuario "%s" no encontrado.' % (user) return render_template('login.html',error='Login error') # Test de la marca de tiempo maxseed = int(time.time() * 1000) + 60000 if u.seed >= seed or seed >= maxseed: print '¡¡Timestamp incorrecto!!' print u.seed print seed print maxseed return render_template('login.html',error='Login error') # Validación de contraseña checkpasswd = md5(u.passwd + str(seed)).hexdigest() if passwd != checkpasswd: print 'Contraseña errónea.' return render_template('login.html',error='Login error') # Genero un id de sessión session = str(uuid4()) # Actualizo el usuario en base de datos u.session = session u.seed = seed db.session.commit() resp = make_response(redirect(url_for('index'))) resp.set_cookie('session', u.session) return resp @app.route('/logout') @auth def logout(user): user.session = None db.session.commit() return redirect(url_for('index')) @app.route('/perfil', methods=['GET','POST']) @auth def perfil(user): if request.method == 'GET': return render_template('perfil.html',u=user) name = request.form['name'] passwd = request.form['passwd'] mail = request.form['mail'] user.name = name user.passwd = <PASSWORD> user.mail = mail db.session.commit() return redirect(url_for('index')) @app.route('/comida') @auth def comida(user): id = request.args.get('id') edit = None if id: edit = Food.query.filter_by(id=id).first() food_list = Food.query.all() return render_template('comida.html',productos=food_list,e=edit,u=user) @app.route('/saveComida', methods=['POST']) @auth def saveComida(user): id = None if 'id' in request.form: id = request.form['id'] name = request.form['name'] price = request.form['price'] if id: f = Food.query.filter_by(id=id).first() f.name = name f.price = price else: f = Food(name,price) db.session.add(f) db.session.commit() return redirect(url_for('comida')) @app.route('/deleteComida') @auth def deleteComida(user): id = int(request.args.get('id')) f = Food.query.filter_by(id=id).first() db.session.delete(f) db.session.commit() return redirect(url_for('comida')) @app.route('/bebida') @auth def bebida(user): id = request.args.get('id') edit = None if id: edit = Drink.query.filter_by(id=id).first() drink_list = Drink.query.all() return render_template('bebida.html',productos=drink_list,e=edit,u=user) @app.route('/saveBebida', methods=['POST']) @auth def saveBebida(user): id = None if 'id' in request.form: id = request.form['id'] name = request.form['name'] price = request.form['price'] if id: f = Drink.query.filter_by(id=id).first() f.name = name f.price = price else: f = Drink(name,price) db.session.add(f) db.session.commit() return redirect(url_for('bebida')) @app.route('/deleteBebida') @auth def deleteBebida(user): id = int(request.args.get('id')) f = Drink.query.filter_by(id=id).first() db.session.delete(f) db.session.commit() return redirect(url_for('bebida')) def start(debug=False): app.run(host='0.0.0.0',debug=debug) if __name__ == '__main__': start(debug=True) <file_sep>/createdb.py # -*- coding: utf-8 -*- from hashlib import md5 from model import db db.create_all() db.session.commit() <file_sep>/tdsayuno #! /usr/bin/env python # -*- coding: utf-8 -*- from optparse import OptionParser from sys import argv import os VERSION = '0.1' def upgrade(): os.system("./upgrade") def run(debug=False): from view import start start(debug=debug) def main(): parser = OptionParser(usage="USAGE: %prog [options]", version="%prog "+VERSION ) parser.add_option("-r", "--run", action="store_true", dest="run", default=False, help="start the application.") parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="start the application in debug mode.") parser.add_option("-u", "--upgrade", action="store_true", dest="upgrade", default=False, help="upgrade database to the last version.",) (options, args) = parser.parse_args() if len(argv) == 1: options.run = True if options.upgrade: upgrade() if options.run or options.debug: run(options.debug) if __name__ == '__main__': main() <file_sep>/sql/create.sql .bail ON CREATE TABLE user ( id INTEGER NOT NULL, user VARCHAR, name VARCHAR, mail VARCHAR, passwd VARCHAR, session VARCHAR, seed INTEGER, karma INTEGER, food INTEGER, drink INTEGER, PRIMARY KEY (id) ); CREATE TABLE version ( id INTEGER NOT NULL, version VARCHAR, PRIMARY KEY (id) ); INSERT INTO "user" VALUES(1,'admin','Administrador','<EMAIL>','<PASSWORD>',NULL,1425374633722,0,NULL,NULL); INSERT INTO "version" VALUES(1,'0.1'); <file_sep>/model.py from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy import time app = Flask('tdsayuno') app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tdsayuno.db' db = SQLAlchemy(app) class User(db.Model): id = db.Column(db.Integer, primary_key=True) user = db.Column(db.String) name = db.Column(db.String) mail = db.Column(db.String) passwd = db.Column(db.String) session = db.Column(db.String) seed = db.Column(db.Integer) karma = db.Column(db.Integer) food = db.Column(db.Integer) drink = db.Column(db.Integer) pref_food = db.Column(db.Integer) pref_drink = db.Column(db.Integer) def __init__(self, user, name, mail, passwd, karma): self.user = user self.name = name self.mail = mail self.passwd = <PASSWORD> self.session = None self.seed = int(time.time() * 1000) self.karma = karma self.food = None self.drink = None self.pref_food = None self.pref_drink = None def isAdmin(self): return self.user=='admin' def __str__(self): return 'User(%d,"%s","%s","%s","%s",%d,%d,%d,%d,%d,%d)' % (self.id,self.user,self.name,self.mail,self.session,self.seed,self.karma,self.food,self.drink,self.pref_food,self.pref_drink) class Version(db.Model): id = db.Column(db.Integer, primary_key=True) version = db.Column(db.String) def __init__(self, version): self.version = version def __str__(self): return 'Version(%d,"%s")' % (self.id,self.version) class Drink(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String) price = db.Column(db.Float) def __init__(self, name, price): self.name = name self.price = price def __str__(self): return 'Drink(%d,"%s",%f)' % (self.id,self.name,self.price) class Food(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String) price = db.Column(db.Float) def __init__(self, name, price): self.name = name self.price = price def __str__(self): return 'Food(%d,"%s",%f)' % (self.id,self.name,self.price)
9adae97dd6e72e736674ffda8cafe5724d04be37
[ "SQL", "Python", "Shell" ]
7
Shell
fjriosp/tdsayuno
5b99ed0d49ce6ee34ab280457df10df8ae646971
cb0b550574b2f49c568533a154501b21b45e7546
refs/heads/master
<repo_name>dlehrich/kitchensink<file_sep>/README.md # Spotify Kitchen Sink Spotify Kitchen Sink is demo application for the Spotify Apps API. It demonstrates some of the available functionality and is designed to help developers who are getting started. Less time reinventing the wheel, more time reinventing the record player. ## Functionality * Implement Tabs * Handle URI Arguments * Process dropped items (track, user, etc) * Play single items (track, artist, etc) * Play in context (album, playlist, etc) * Control Playback (pause, skip, etc) * Listen for track changes * Get tracks from the user’s library * Share a track using the built-in popup * Scan friends on Spotify * Fetch a user’s top tracks * Create and save temporary playlists * Output list and album views * Send tracks to inbox * Use layouts with Spotify styles * Search Spotify and Local tracks * Use advanced search queries * Fetch metadata in a variety of different methods ## Background The Spotify Apps API is fairly new (as of December 2011), so there’s still a lot of guesswork going on. I was able to a few things out, thanks to the Sample Code and a healthy dose of trial-and-error. Exploring the source code of working applications seemed to be the best way to learn, so when a friend needed help, I whipped together this Kitchen Sink. I’m now putting it up on GitHub in case it helps anyone else. If you have any questions, or would like to contribute, please get in touch. ## Resources Here’s a few resources that helped me out along the way: Docs * Spotify Apps Docs: http://developer.spotify.com/en/spotify-apps-api/overview/ * Building a Spotify App: http://musicmachinery.com/2011/12/02/building-a-spotify-app/ * PasteBin Examples: http://pastebin.com/u/MrSiir * StackOverflow Question: http://stackoverflow.com/questions/8353471/spotify-apps-api-any-more-documentation * My First Spotify Trivia Game: http://onthedll.com/2011/12/07/my-first-spotify-trivia-game/ Read / written something helpful? Let me know and I’ll include it! Apps * Tutorial: http://developer.spotify.com/download/spotify-apps-api/tutorial/ * Spotichat: https://github.com/sethmurphy/Spotichat * Mood Knobs: https://github.com/alexmic/mood-knobs * Spartify: https://github.com/blixt/spartify * Repeat-One: https://github.com/fxb/repeat-one * Guess The Intro: https://github.com/chielkunkels/spotify-guess-the-intro * SpotifyEchoNestPlaylistDemo: https://gist.github.com/1438262 Building something cool with the API? Let me know and I’ll add it! ## Disclaimers This is an unofficial application, not sponsored or endorsed by Spotify. If you wish to develop for Spotify Apps, you must sign up for their Developer Program, and abide by their rules. In the manifest, there is an undocumented parameter called “ApiPermissions”. This is was adapted from the “API” sample application. I would have left it out, but it is required for some calls, such as fetching the current user’s friends. I assume that at some point Spotify will introduce a system, where users will give you permission to access their data, similar to Facebook apps. But until then, bear in mind that some of these features might not be available to production apps (delete the permissions from the manifest to avoid this issue). This is a work in progress. There’s plenty that I’ve left out and plenty that I don’t know. If you have any suggestions or would like to add something, I would love to hear from you. This is a hack. It’s also my first open source project. So go easy! ## Contact pwattsmail at gmail dot com // @ptrwtts ## License Distributed under the MIT license. Copyright (c) 2011 <NAME><file_sep>/js/social.js function socialArgs(args) { getFriend(args[1]); } var tempPlaylist; var friends = [sp.core.user]; // There is an I in friends var spotifyFriends = sp.social.relations.allSpotifyUsers(); // Returns array of user IDs var allFriends = sp.social.relations.all(); // Returns full user objects $(function(){ // Loop through all friends, and fetch top tracks for any spotify users $.each(allFriends,function(index,friendURI){ if($.inArray(friendURI, spotifyFriends)!=-1) { // Some users aren't on Spotify var friend = sp.social.relations.getUserInfo(index); var local = localStorage.getItem(friend.username); if(local) { processFriend(friend,JSON.parse(local)); } else { var toplist = sp.social.getToplist("track","user",friend.username,{ onSuccess: function(response) { localStorage.setItem(friend.username,JSON.stringify(response)); processFriend(friend,response); }, onFailure: function(error) { // Some users fail (seems to be users who don't share on Facebook) localStorage.setItem(friend.username,JSON.stringify({"error":error})); spotifyFriends.splice(spotifyFriends.indexOf(friendURI), 1); if(spotifyFriends.length==0){ showFriends(); } } }); } } }); $("#artists a").live('click',function(e){ showFriend($(this).attr('href')); e.preventDefault(); }); $("#savePlaylist").live('click',function(e){ sp.core.library.createPlaylist($("#friend h2").text(), tempPlaylist.data.all()); e.preventDefault(); }); $("#rickRoll").live('click',function(e){ sp.social.sendToInbox( $(this).attr("user"), // username "Just testing!", // message "spotify:track:0ac0R0wkioYDhzQDbCFokO", // track / album / artist / playlist { onSuccess: function(response) { // callbacks $("#rickRoll").text("Done!"); console.log(response); } } ); e.preventDefault(); }); }); function processFriend(friend,response) { spotifyFriends.splice(spotifyFriends.indexOf(friend.uri), 1); if(response.tracks) { // Some users have no Top Data friends.push(friend); } if(spotifyFriends.length==0){ // All friends have been processed showFriends(); } } function showFriends() { $("#friends").empty(); $.each(friends,function(index,friend){ if(friend) { if(friend.uri!="") { var image = "sp://import/img/placeholders/28-user-buddy.png"; if(friend.icon!="") { image = friend.icon; } $("#friends").append( '<a href="spotify:app:kitchensink:social:'+friend.username+'">'+ '<div class="image" style="background-image:url('+image+')"></div>'+ '<span>'+friend.name+'</span>'+ '</a>'); } } }); } function getFriend(username) { var local = localStorage.getItem(username); if(local) { showFriend(username,JSON.parse(local)); } else { var toplist = sp.social.getToplist("track","user",username,{ onSuccess: function(response) { console.log(response); localStorage.setItem(username,JSON.stringify(response)); showFriend(username,response); }, onFailure: function(response) { console.log(response); } }); } } function showFriend(username,response) { $("#friend-details").empty(); $("#friend-tracks").empty(); tempPlaylist = new m.Playlist(); $.each(response.tracks,function(num,track){ tempPlaylist.add(m.Track.fromURI(track.uri)); }); var playlistArt = new v.Player(); playlistArt.track = tempPlaylist.get(0); playlistArt.context = tempPlaylist; $("#friend-details").append(playlistArt.node); var friendDetails = "<div class='left'>"+ "<h2>"+username+"'s Weekly Top Tracks</h2>"+ "<button id='savePlaylist' class='add-playlist button icon'>Save As Playlist</button><br>"+ "<button id='rickRoll' class='new-button' user='"+username+"'>Rick Roll "+name.split(" ")[0]+"</button> (WARNING - This will actually send a track to their inbox!)"; "</div>"; $("#friend-details").append(friendDetails); $("#friend-details").append('<div class="clear"></div>'); var playlistList = new v.List(tempPlaylist); playlistList.node.classList.add("temporary"); $("#friend-tracks").append(playlistList.node); sp.social.getUserByUsername(username,{ onSuccess: function(response) { console.log(response); $("#friend-details h2").text(response.name+"'s Weekly Top Tracks"); } }); }
bb862d7b3236873c9dd438aaf6239e1ff5836bf8
[ "Markdown", "JavaScript" ]
2
Markdown
dlehrich/kitchensink
8eebb36369ac190933a636cbb0edebbd51bd014e
54a43747653650ddb2931120d4faa9f4562bda73
refs/heads/master
<repo_name>AyiAyaYue/The-Safe---Arduino<file_sep>/ass2.ino /* Author: <NAME> Date: 11.10.2020 Assignment 2: The safe */ #include "Display.h" #define BTN_INPUT 9 #define BTN_CONFIRM 8 #define LED_YELLOW 7 #define LED_GREEN 5 #define LED_RED 4 #define BUZZER 3 #define LDR A2 int num = 0; String result = ""; String passcode = "<PASSWORD>"; int failedUnlockAttempts = 0; int lastBtnInputState = HIGH; int lastBtnConfirmState = HIGH; bool hasDoorOpened = false; enum STATE { LOCKED, UNLOCKED }; int previousState = STATE::LOCKED; int currentState = STATE::LOCKED; enum INPUT_STATE { OFF, DIGIT1, DIGIT2, DIGIT3, DIGIT4, CONFIRM }; int currentInputState = INPUT_STATE::OFF; int lastInputState = INPUT_STATE::OFF; void setup() { Serial.begin(9600); pinMode(BTN_INPUT, INPUT_PULLUP); pinMode(BTN_CONFIRM, INPUT_PULLUP); pinMode(LED_GREEN, OUTPUT); pinMode(LED_RED, OUTPUT); pinMode(BUZZER, OUTPUT); Display.clear(); } void loop() { bool isDoorOpen = map(analogRead(LDR), 0, 500, 0, 1); /* check if the door is opened when it is locked, if so the alarm is triggered. And door state is detected and changed*/ switch(currentState) { case STATE::LOCKED: { if (isDoorOpen) { if (!hasDoorOpened) { enableAlarm(); } hasDoorOpened = true; return; } else if (hasDoorOpened) { hasDoorOpened = false; } bool btnConfirm = isButtonPressed(BTN_CONFIRM, lastBtnConfirmState); bool btnInput = isButtonPressed(BTN_INPUT, lastBtnInputState); /* setting comfirm button function*/ /*change digit positions*/ if (btnConfirm) { switch(currentInputState) { case INPUT_STATE::OFF: Display.show("----"); delay(1000); break; case INPUT_STATE::DIGIT1: case INPUT_STATE::DIGIT2: case INPUT_STATE::DIGIT3: case INPUT_STATE::DIGIT4: result += num; } int nextInputState = ++currentInputState; /* compare entered code with predefined passcode*/ switch(nextInputState) { case INPUT_STATE::CONFIRM: if (result == passcode) { unlockVault(); currentInputState = INPUT_STATE::OFF; } else { if (++failedUnlockAttempts >= 3) { enableAlarm(); failedUnlockAttempts = 0; } Display.show("----"); currentInputState = INPUT_STATE::DIGIT1; } result = ""; break; } num = 1; } /* setting input button function */ if (btnInput) { if (++num % 5 == 0) { num = 1; } } /* update the display*/ if (btnConfirm || btnInput) { switch(currentInputState) { case INPUT_STATE::DIGIT1: case INPUT_STATE::DIGIT2: case INPUT_STATE::DIGIT3: case INPUT_STATE::DIGIT4: Display.showCharAt(currentInputState - 1, '0'+ num); } } break; } /* when it is unlocked and the door is open, door state is changed. when the door is close, it is locked again. only when the door is opened with correct passcode and then close, will it send notification (yellow led) */ case STATE::UNLOCKED: if (isDoorOpen) { hasDoorOpened = true; } else if (hasDoorOpened) { currentState = STATE::LOCKED; } break; default: currentState = STATE::LOCKED; } if (currentState != previousState) { stateChangeNotification(); } previousState = currentState; } bool isButtonPressed(int pinNumber, int &lastButtonState) { int buttonState = digitalRead(pinNumber); if (buttonState != lastButtonState) { lastButtonState = buttonState; if (buttonState == LOW) { return true; } } return false; } void enableAlarm() { Serial.println((String)"ALARM SAFE " + 1337); tone(BUZZER, 1000); for (int i = 0; i < 10; ++i) { digitalWrite(LED_RED, HIGH); delay(250); digitalWrite(LED_RED, LOW); delay(250); } noTone(BUZZER); } void unlockVault() { currentState = STATE::UNLOCKED; Display.show("OPEN"); digitalWrite(LED_GREEN, HIGH); delay(5000); digitalWrite(LED_GREEN, LOW); Display.clear(); } void stateChangeNotification() { tone(BUZZER, 1000); digitalWrite(LED_YELLOW, HIGH); delay(200); digitalWrite(LED_YELLOW, LOW); noTone(BUZZER); } <file_sep>/README.md # The-Safe---Arduino Second school assignment for Emebeded system
2e808b652ee63fadcbb28c0d0213416c9007cbcb
[ "Markdown", "C++" ]
2
C++
AyiAyaYue/The-Safe---Arduino
52fcf363d9458f862f696c8367bce9a2a4e324b3
4019530e93f35cf6183a194fc79288849149e99f
refs/heads/master
<repo_name>kenotr0n/lettuce<file_sep>/src/test/java/com/lambdaworks/redis/cluster/RedisClusterSetupTest.java package com.lambdaworks.redis.cluster; import static com.google.code.tempusfugit.temporal.Duration.*; import static com.google.code.tempusfugit.temporal.Timeout.*; import static com.lambdaworks.redis.cluster.ClusterTestUtil.*; import static org.assertj.core.api.Assertions.*; import java.util.Set; import java.util.concurrent.TimeoutException; import org.junit.*; import com.google.code.tempusfugit.temporal.Condition; import com.google.code.tempusfugit.temporal.WaitFor; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.lambdaworks.redis.*; import com.lambdaworks.redis.cluster.models.partitions.ClusterPartitionParser; import com.lambdaworks.redis.cluster.models.partitions.Partitions; import com.lambdaworks.redis.cluster.models.partitions.RedisClusterNode; /** * @author <a href="mailto:<EMAIL>"><NAME></a> * @since 3.0 */ @SuppressWarnings("unchecked") public class RedisClusterSetupTest { public static final String host = TestSettings.hostAddr(); public static final int port1 = 7383; public static final int port2 = 7384; private static RedisClusterClient clusterClient; private static RedisClient client1; private static RedisClient client2; private RedisClusterConnection<String, String> redis1; private RedisClusterConnection<String, String> redis2; @Rule public ClusterRule clusterRule = new ClusterRule(clusterClient, port1, port2); @BeforeClass public static void setupClient() { clusterClient = new RedisClusterClient(RedisURI.Builder.redis(host, port1).build()); client1 = new RedisClient(host, port1); client2 = new RedisClient(host, port2); } @AfterClass public static void shutdownClient() { FastShutdown.shutdown(clusterClient); FastShutdown.shutdown(client1); FastShutdown.shutdown(client2); } @Before public void openConnection() throws Exception { redis1 = client1.connect(); redis2 = client2.connect(); clusterRule.clusterReset(); } @After public void closeConnection() throws Exception { redis1.close(); redis2.close(); } @Test public void clusterMeet() throws Exception { Partitions partitionsBeforeMeet = ClusterPartitionParser.parse(redis1.clusterNodes()); assertThat(partitionsBeforeMeet.getPartitions()).hasSize(1); String result = redis1.clusterMeet(host, port2); assertThat(result).isEqualTo("OK"); waitForCluster(redis1); Partitions partitionsAfterMeet = ClusterPartitionParser.parse(redis1.clusterNodes()); assertThat(partitionsAfterMeet.getPartitions()).hasSize(2); } @Test public void clusterForget() throws Exception { String result = redis1.clusterMeet(host, port2); assertThat(result).isEqualTo("OK"); waitForCluster(redis1); WaitFor.waitOrTimeout(new Condition() { @Override public boolean isSatisfied() { Partitions partitions = ClusterPartitionParser.parse(redis1.clusterNodes()); for (RedisClusterNode redisClusterNode : partitions.getPartitions()) { if (redisClusterNode.getFlags().contains(RedisClusterNode.NodeFlag.HANDSHAKE)) { return false; } } return true; } }, timeout(seconds(5))); Partitions partitions = ClusterPartitionParser.parse(redis1.clusterNodes()); for (RedisClusterNode redisClusterNode : partitions.getPartitions()) { if (!redisClusterNode.getFlags().contains(RedisClusterNode.NodeFlag.MYSELF)) { redis1.clusterForget(redisClusterNode.getNodeId()); } } Thread.sleep(300); Partitions partitionsAfterForget = ClusterPartitionParser.parse(redis1.clusterNodes()); assertThat(partitionsAfterForget.getPartitions()).hasSize(1); } private void waitForCluster(final RedisClusterConnection<String, String> connection) throws InterruptedException, TimeoutException { WaitFor.waitOrTimeout(new Condition() { @Override public boolean isSatisfied() { Partitions partitionsAfterMeet = ClusterPartitionParser.parse(connection.clusterNodes()); return partitionsAfterMeet.getPartitions().size() == 2; } }, timeout(seconds(5))); } @Test public void clusterAddDelSlots() throws Exception { redis1.clusterMeet(host, port2); waitForCluster(redis1); waitForCluster(redis2); add6SlotsEach(); waitForSlots(redis1, 6); waitForSlots(redis2, 6); final Set<Integer> set1 = ImmutableSet.of(1, 2, 3, 4, 5, 6); final Set<Integer> set2 = ImmutableSet.of(7, 8, 9, 10, 11, 12); deleteSlots(redis1, set1); deleteSlots(redis2, set2); verifyDeleteSlots(redis1, set1); verifyDeleteSlots(redis2, set2); } protected void verifyDeleteSlots(final RedisClusterConnection<String, String> connection, final Set<Integer> slots) { try { WaitFor.waitOrTimeout(new Condition() { @Override public boolean isSatisfied() { RedisClusterNode ownPartition = getOwnPartition(connection); boolean condition = ownPartition.getSlots().isEmpty(); if (!ownPartition.getSlots().isEmpty()) { deleteSlots(connection, slots); } return condition; } }, timeout(seconds(5))); } catch (Exception e) { RedisClusterNode ownPartition = getOwnPartition(connection); fail("Slots not deleted, Slots on " + ownPartition.getUri() + ":" + ownPartition.getSlots(), e); } } private void deleteSlots(RedisClusterConnection<String, String> connection, Set<Integer> slots) { for (Integer slot : slots) { try { connection.clusterDelSlots(slot); } catch (RedisException e) { } } } @Test public void clusterSetSlots() throws Exception { redis1.clusterMeet(host, port2); waitForCluster(redis1); waitForCluster(redis2); add6SlotsEach(); waitForSlots(redis1, 6); waitForSlots(redis2, 6); redis1.clusterSetSlotNode(6, getNodeId(redis2)); waitForSlots(redis1, 5); Partitions partitions = ClusterPartitionParser.parse(redis1.clusterNodes()); for (RedisClusterNode redisClusterNode : partitions.getPartitions()) { if (redisClusterNode.getFlags().contains(RedisClusterNode.NodeFlag.MYSELF)) { assertThat(redisClusterNode.getSlots()).isEqualTo(ImmutableList.of(1, 2, 3, 4, 5)); } } } private void add6SlotsEach() { for (int i = 1; i < 7; i++) { redis1.clusterAddSlots(i); } for (int i = 7; i < 13; i++) { redis2.clusterAddSlots(i); } } private void waitForSlots(final RedisClusterConnection<String, String> nodeConnection, final int expectedCount) throws InterruptedException, TimeoutException { try { WaitFor.waitOrTimeout(new Condition() { @Override public boolean isSatisfied() { RedisClusterNode ownPartition = getOwnPartition(nodeConnection); return ownPartition.getSlots().size() == expectedCount; } }, timeout(seconds(10))); } catch (Exception e) { RedisClusterNode ownPartition = getOwnPartition(nodeConnection); fail("Fail on waiting for slots on " + ownPartition.getUri() + ", expected count " + expectedCount + ", actual: " + ownPartition.getSlots(), e); } } @Test public void clusterSlotMigrationImport() throws Exception { redis1.clusterMeet(host, port2); waitForCluster(redis1); waitForCluster(redis2); add6SlotsEach(); waitForSlots(redis1, 6); waitForSlots(redis2, 6); String nodeId1 = getNodeId(redis1); String nodeId2 = getNodeId(redis2); assertThat(redis1.clusterSetSlotMigrating(6, nodeId2)).isEqualTo("OK"); assertThat(redis1.clusterSetSlotImporting(12, nodeId2)).isEqualTo("OK"); RedisClusterNode partition1 = getOwnPartition(redis1); RedisClusterNode partition2 = getOwnPartition(redis2); assertThat(partition1.getSlots()).hasSize(6); assertThat(partition2.getSlots()).hasSize(6); } } <file_sep>/src/main/java/com/lambdaworks/redis/cluster/RedisClusterClient.java package com.lambdaworks.redis.cluster; import static com.google.common.base.Preconditions.*; import java.io.Closeable; import java.net.SocketAddress; import java.util.ArrayDeque; import java.util.Collections; import java.util.List; import java.util.Queue; import java.util.concurrent.TimeUnit; import com.google.common.base.Supplier; import com.google.common.collect.Lists; import com.lambdaworks.redis.*; import com.lambdaworks.redis.cluster.models.partitions.ClusterPartitionParser; import com.lambdaworks.redis.cluster.models.partitions.Partitions; import com.lambdaworks.redis.cluster.models.partitions.RedisClusterNode; import com.lambdaworks.redis.codec.RedisCodec; import com.lambdaworks.redis.codec.Utf8StringCodec; import com.lambdaworks.redis.protocol.CommandHandler; import com.lambdaworks.redis.protocol.RedisCommand; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; /** * A scalable thread-safe <a href="http://redis.io/">Redis</a> cluster client. Multiple threads may share one connection * provided they avoid blocking and transactional operations such as BLPOP and MULTI/EXEC. * * @author <a href="mailto:<EMAIL>"><NAME></a> * @since 3.0 */ public class RedisClusterClient extends AbstractRedisClient { private static final InternalLogger logger = InternalLoggerFactory.getInstance(RedisClusterClient.class); private Partitions partitions; private List<RedisURI> initialUris = Lists.newArrayList(); private RedisClusterClient() { } /** * Initialize the client with an initial cluster URI. * * @param initialUri initial cluster URI */ public RedisClusterClient(RedisURI initialUri) { this(Collections.singletonList(checkNotNull(initialUri, "RedisURI (initial uri) must not be null"))); } /** * Initialize the client with a list of cluster URI's. All uris are tried in sequence for connecting initially to the * cluster. If any uri is sucessful for connection, the others are not tried anymore. The initial uri is needed to discover * the cluster structure for distributing the requests. * * @param initialUris list of initial cluster URIs */ public RedisClusterClient(List<RedisURI> initialUris) { this.initialUris = initialUris; checkNotNull(initialUris, "initialUris must not be null"); checkArgument(!initialUris.isEmpty(), "initialUris must not be empty"); setDefaultTimeout(getFirstUri().getTimeout(), getFirstUri().getUnit()); } /** * Open a new synchronous connection to the redis cluster that treats keys and values as UTF-8 strings. * * @return A new connection. */ public RedisAdvancedClusterConnection<String, String> connectCluster() { return connectCluster(newStringStringCodec()); } /** * Open a new synchronous connection to the redis server. Use the supplied {@link RedisCodec codec} to encode/decode keys * and values. * * @param codec Use this codec to encode/decode keys and values. * @param <K> Key type. * @param <V> Value type. * @return A new connection. */ @SuppressWarnings("unchecked") public <K, V> RedisAdvancedClusterConnection<K, V> connectCluster(RedisCodec<K, V> codec) { return (RedisAdvancedClusterConnection<K, V>) syncHandler(connectClusterAsyncImpl(codec), RedisAdvancedClusterConnection.class, RedisClusterConnection.class); } /** * Creates a connection to the redis cluster. * * @return A new connection. */ public RedisAdvancedClusterAsyncConnection<String, String> connectClusterAsync() { return connectClusterAsyncImpl(newStringStringCodec(), getSocketAddressSupplier()); } /** * Creates a connection to the redis cluster. * * @param codec Use this codec to encode/decode keys and values. * @param <K> Key type. * @param <V> Value type. * @return A new connection. */ public <K, V> RedisAdvancedClusterAsyncConnection<K, V> connectClusterAsync(RedisCodec<K, V> codec) { return connectClusterAsyncImpl(codec, getSocketAddressSupplier()); } protected RedisAsyncConnectionImpl<String, String> connectAsyncImpl(SocketAddress socketAddress) { return connectAsyncImpl(newStringStringCodec(), socketAddress); } /** * Create a connection to a redis socket address. * * @param socketAddress initial connect * @param <K> Key type. * @param <V> Value type. * @return a new connection */ <K, V> RedisAsyncConnectionImpl<K, V> connectAsyncImpl(RedisCodec<K, V> codec, final SocketAddress socketAddress) { logger.debug("connectAsyncImpl(" + socketAddress + ")"); Queue<RedisCommand<K, V, ?>> queue = new ArrayDeque<RedisCommand<K, V, ?>>(); CommandHandler<K, V> handler = new CommandHandler<K, V>(clientOptions, queue); RedisAsyncConnectionImpl<K, V> connection = newRedisAsyncConnectionImpl(handler, codec, timeout, unit); connectAsyncImpl(handler, connection, new Supplier<SocketAddress>() { @Override public SocketAddress get() { return socketAddress; } }); connection.registerCloseables(closeableResources, connection); return connection; } <K, V> RedisAsyncConnectionImpl<K, V> connectClusterAsyncImpl(RedisCodec<K, V> codec) { return connectClusterAsyncImpl(codec, getSocketAddressSupplier()); } /** * Create a clustered connection with command distributor. * * @param codec the codec to use * @param socketAddressSupplier address supplier for initial connect and re-connect * @param <K> Key type. * @param <V> Value type. * @return a new connection */ <K, V> RedisAdvancedClusterAsyncConnectionImpl<K, V> connectClusterAsyncImpl(RedisCodec<K, V> codec, final Supplier<SocketAddress> socketAddressSupplier) { if (partitions == null) { initializePartitions(); } logger.debug("connectCluster(" + socketAddressSupplier.get() + ")"); Queue<RedisCommand<K, V, ?>> queue = new ArrayDeque<RedisCommand<K, V, ?>>(); CommandHandler<K, V> handler = new CommandHandler<K, V>(clientOptions, queue); final PooledClusterConnectionProvider<K, V> pooledClusterConnectionProvider = new PooledClusterConnectionProvider<K, V>( this, codec); final ClusterDistributionChannelWriter<K, V> clusterWriter = new ClusterDistributionChannelWriter<K, V>(handler, pooledClusterConnectionProvider); RedisAdvancedClusterAsyncConnectionImpl<K, V> connection = newRedisAsyncConnectionImpl(clusterWriter, codec, timeout, unit); connection.setPartitions(partitions); connectAsyncImpl(handler, connection, socketAddressSupplier); connection.registerCloseables(closeableResources, connection, clusterWriter, pooledClusterConnectionProvider); if (getFirstUri().getPassword() != null) { connection.auth(new String(getFirstUri().getPassword())); } return connection; } /** * Reload partitions and re-initialize the distribution table. */ public void reloadPartitions() { if (partitions == null) { initializePartitions(); partitions.updateCache(); } else { Partitions loadedPartitions = loadPartitions(); this.partitions.getPartitions().clear(); this.partitions.getPartitions().addAll(loadedPartitions.getPartitions()); this.partitions.reload(loadedPartitions.getPartitions()); } for (Closeable c : closeableResources) { if (c instanceof RedisAdvancedClusterAsyncConnectionImpl) { RedisAdvancedClusterAsyncConnectionImpl<?, ?> connection = (RedisAdvancedClusterAsyncConnectionImpl<?, ?>) c; if (connection.getChannelWriter() instanceof ClusterDistributionChannelWriter) { connection.setPartitions(this.partitions); } } } } protected void initializePartitions() { Partitions loadedPartitions = loadPartitions(); this.partitions = loadedPartitions; } /** * Retrieve the cluster view. Partitions are shared amongst all connections opened by this client instance. * * @return the partitions. */ public Partitions getPartitions() { if (partitions == null) { initializePartitions(); } return partitions; } /** * Retrieve partitions. * * @return Partitions */ protected Partitions loadPartitions() { String clusterNodes = null; RedisURI nodeUri = null; Exception lastException = null; for (RedisURI initialUri : initialUris) { try { RedisAsyncConnectionImpl<String, String> connection = connectAsyncImpl(initialUri.getResolvedAddress()); nodeUri = initialUri; clusterNodes = connection.clusterNodes().get(); connection.close(); break; } catch (Exception e) { lastException = e; } } if (clusterNodes == null) { if (lastException == null) { throw new RedisException("Cannot retrieve initial cluster partitions from initial URIs " + initialUris); } throw new RedisException("Cannot retrieve initial cluster partitions from initial URIs " + initialUris, lastException); } Partitions loadedPartitions = ClusterPartitionParser.parse(clusterNodes); for (RedisClusterNode partition : loadedPartitions) { if (partition.getFlags().contains(RedisClusterNode.NodeFlag.MYSELF)) { partition.setUri(nodeUri); } if (nodeUri != null && nodeUri.getPassword() != null) { partition.getUri().setPassword(new String(nodeUri.getPassword())); } } return loadedPartitions; } /** * Construct a new {@link RedisAdvancedClusterAsyncConnectionImpl}. Can be overridden in order to construct a subclass of * {@link RedisAdvancedClusterAsyncConnectionImpl} * * @param channelWriter the channel writer * @param codec the codec to use * @param timeout Timeout value * @param unit Timeout unit * @param <K> Key type. * @param <V> Value type. * @return RedisAdvancedClusterAsyncConnectionImpl&lt;K, V&gt; instance */ protected <K, V> RedisAdvancedClusterAsyncConnectionImpl<K, V> newRedisAsyncConnectionImpl( RedisChannelWriter<K, V> channelWriter, RedisCodec<K, V> codec, long timeout, TimeUnit unit) { return new RedisAdvancedClusterAsyncConnectionImpl<K, V>(channelWriter, codec, timeout, unit); } protected RedisURI getFirstUri() { checkState(!initialUris.isEmpty(), "initialUris must not be empty"); return initialUris.get(0); } private Supplier<SocketAddress> getSocketAddressSupplier() { return new Supplier<SocketAddress>() { @Override public SocketAddress get() { return getFirstUri().getResolvedAddress(); } }; } protected Utf8StringCodec newStringStringCodec() { return new Utf8StringCodec(); } public void setPartitions(Partitions partitions) { this.partitions = partitions; } }
bef0be42e1df5cb3a43f7d1604856a92058301a6
[ "Java" ]
2
Java
kenotr0n/lettuce
d08c6d320cfe995471e3ea916fceff47ee0bf170
633dee5793375f9f5db2df92d57553e5271e19b5
refs/heads/master
<file_sep>package com.linkedin.thirdeye.dashboard.resources.v2; import com.linkedin.thirdeye.dashboard.resources.v2.pojo.RootCauseEntity; import com.linkedin.thirdeye.dashboard.resources.v2.rootcause.DefaultEntityFormatter; import com.linkedin.thirdeye.dashboard.resources.v2.rootcause.DimensionEntityFormatter; import com.linkedin.thirdeye.dashboard.resources.v2.rootcause.EventEntityFormatter; import com.linkedin.thirdeye.dashboard.resources.v2.rootcause.MetricEntityFormatter; import com.linkedin.thirdeye.dashboard.resources.v2.rootcause.ServiceEntityFormatter; import com.linkedin.thirdeye.datasource.DAORegistry; import com.linkedin.thirdeye.rootcause.Entity; import com.linkedin.thirdeye.rootcause.RCAFramework; import com.linkedin.thirdeye.rootcause.RCAFrameworkExecutionResult; import com.linkedin.thirdeye.rootcause.impl.EntityUtils; import com.linkedin.thirdeye.rootcause.impl.MetricEntity; import com.linkedin.thirdeye.rootcause.impl.TimeRangeEntity; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; import javax.ws.rs.DefaultValue; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.ISODateTimeFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Path(value = "/rootcause") @Produces(MediaType.APPLICATION_JSON) public class RootCauseResource { private static final Logger LOG = LoggerFactory.getLogger(RootCauseResource.class); private final List<RootCauseEntityFormatter> formatters; private final RCAFramework rootCauseFramework; private final RCAFramework relatedMetricsFramework; public RootCauseResource(RCAFramework rootCauseFramework, RCAFramework relatedMetricsFramework, List<RootCauseEntityFormatter> formatters) { this.rootCauseFramework = rootCauseFramework; this.relatedMetricsFramework = relatedMetricsFramework; this.formatters = formatters; if(this.rootCauseFramework == null) LOG.info("RootCauseFramework not configured. Disabling '/queryRootCause' endpoint."); if(this.relatedMetricsFramework == null) LOG.info("RelatedMetricsFramework not configured. Disabling '/queryRelatedMetrics' endpoint."); } @GET @Path("/queryRootCause") public List<RootCauseEntity> queryRootCause( @QueryParam("current") Long current, @QueryParam("baseline") Long baseline, @QueryParam("windowSize") Long windowSize, @QueryParam("urn") List<String> urns) throws Exception { // configuration validation if(this.rootCauseFramework == null) throw new IllegalStateException("RootCauseFramework not configured. Endpoint disabled."); // input validation if(current == null) throw new IllegalArgumentException("Must provide current timestamp (in milliseconds)"); if(baseline == null) throw new IllegalArgumentException("Must provide baseline timestamp (in milliseconds)"); if(windowSize == null) throw new IllegalArgumentException("Must provide windowSize (in milliseconds)"); // format input Set<Entity> input = new HashSet<>(); input.add(TimeRangeEntity.fromRange(1.0, TimeRangeEntity.TYPE_CURRENT, current - windowSize, current)); input.add(TimeRangeEntity.fromRange(1.0, TimeRangeEntity.TYPE_BASELINE, baseline - windowSize, baseline)); for(String urn : urns) { input.add(EntityUtils.parseURN(urn, 1.0)); } // run root-cause analysis RCAFrameworkExecutionResult result = this.rootCauseFramework.run(input); // apply formatters return applyFormatters(result.getResultsSorted()); } @GET @Path("/queryRelatedMetrics") public List<RootCauseEntity> queryRelatedMetrics( @QueryParam("current") Long current, @QueryParam("baseline") Long baseline, @QueryParam("windowSize") Long windowSize, @QueryParam("metricUrn") String metricUrn) throws Exception { // configuration validation if(this.relatedMetricsFramework == null) throw new IllegalStateException("RelatedMetricsFramework not configured. Endpoint disabled."); // input validation if(current == null) throw new IllegalArgumentException("Must provide current timestamp (in milliseconds)"); if(baseline == null) throw new IllegalArgumentException("Must provide baseline timestamp (in milliseconds)"); if(windowSize == null) throw new IllegalArgumentException("Must provide windowSize (in milliseconds)"); if(metricUrn == null) throw new IllegalArgumentException("Must provide metricUrn"); if(!MetricEntity.TYPE.isType(metricUrn)) throw new IllegalArgumentException(String.format("URN '%s' is not a MetricEntity", metricUrn)); // format input Set<Entity> input = new HashSet<>(); input.add(TimeRangeEntity.fromRange(1.0, TimeRangeEntity.TYPE_CURRENT, current - windowSize, current)); input.add(TimeRangeEntity.fromRange(1.0, TimeRangeEntity.TYPE_BASELINE, baseline - windowSize, baseline)); input.add(EntityUtils.parseURN(metricUrn, 1.0)); // run related metrics analysis RCAFrameworkExecutionResult result = this.relatedMetricsFramework.run(input); // apply formatters return applyFormatters(result.getResultsSorted()); } private List<RootCauseEntity> applyFormatters(Iterable<Entity> entities) { List<RootCauseEntity> output = new ArrayList<>(); for(Entity e : entities) { output.add(applyFormatters(e)); } return output; } private RootCauseEntity applyFormatters(Entity e) { for(RootCauseEntityFormatter formatter : this.formatters) { if(formatter.applies(e)) { try { return formatter.format(e); } catch (Exception ex) { LOG.warn("Error applying formatter '{}'. Skipping.", formatter.getClass().getName(), ex); } } } throw new IllegalArgumentException(String.format("No formatter for Entity '%s'", e.getUrn())); } } <file_sep>/** * Copyright (C) 2014-2016 LinkedIn Corp. (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.integration.tests; import com.linkedin.pinot.common.config.Tenant; import com.linkedin.pinot.common.utils.ControllerTenantNameBuilder; import com.linkedin.pinot.common.utils.TenantRole; import com.linkedin.pinot.controller.helix.ControllerRequestURLBuilder; import com.linkedin.pinot.core.indexsegment.generator.SegmentVersion; import org.json.JSONObject; import org.testng.Assert; import org.testng.annotations.Test; /** * This test sets up the Pinot cluster and ensures the sanity/consistency of * cluster after modifications are performed on the cluster. */ public class ClusterSanityTest extends ClusterTest { private static final String tenantName = ControllerTenantNameBuilder.DEFAULT_TENANT_NAME; private static final String brokerTenant = tenantName + "_BROKER"; private static final String TABLE_NAME = "ClusterSanityTestTable"; private static final long WAIT_TIME_FOR_TENANT_UPDATE = 2000; /** * This test ensures that the cluster is in a consistent after the number of brokers * is reduced. * @throws Exception */ @Test public void testBrokerScaleDown() throws Exception { setupCluster(2, 1); scaleDownBroker(1); try { Assert.assertEquals(_helixAdmin.getInstancesInClusterWithTag(getHelixClusterName(), brokerTenant).size(), 1); } catch (Exception e) { Assert.fail("Exception caught while getting all instances in cluster with tag: " + e); } finally { tearDownCluster(); } } /** * Set up the Pinot cluster with provided number of brokers and servers. * * @param numBrokers * @param numServers * @throws Exception */ public void setupCluster(int numBrokers, int numServers) throws Exception { startZk(); startController(); startBrokers(numBrokers); startServers(numServers); addOfflineTable("", "", -1, "", tenantName, tenantName, TABLE_NAME, SegmentVersion.v1); } /** * Tear down the Pinot cluster. * * @throws Exception */ public void tearDownCluster() throws Exception { stopBroker(); stopController(); stopServer(); stopZk(); } /** * Helper method to reduce the number of brokers to the provided value. * Assumes that the number of existing brokers is greater then the desired number. * * @param newNumBrokers * @return * @throws Exception */ private String scaleDownBroker(int newNumBrokers) throws Exception { Tenant tenant = new Tenant.TenantBuilder(tenantName).setRole(TenantRole.BROKER) .setTotalInstances(newNumBrokers) .setOfflineInstances(newNumBrokers) .build(); // Send the 'put' (instead of 'post') request, that updates the tenants instead of creating. JSONObject request = tenant.toJSON(); sendPutRequest(ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL).forBrokerTenantCreate(), request.toString()); Thread.sleep(WAIT_TIME_FOR_TENANT_UPDATE); return brokerTenant; } } <file_sep>/** * Copyright (C) 2014-2016 LinkedIn Corp. (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.controller.api.restlet.resources; import com.linkedin.pinot.common.protocols.SegmentCompletionProtocol; import com.linkedin.pinot.common.restlet.swagger.Description; import com.linkedin.pinot.common.restlet.swagger.HttpVerb; import com.linkedin.pinot.common.restlet.swagger.Paths; import com.linkedin.pinot.common.restlet.swagger.Summary; import com.linkedin.pinot.common.utils.LLCSegmentName; import java.io.File; import java.io.IOException; import java.util.List; import org.apache.commons.fileupload.FileItem; import org.apache.commons.fileupload.disk.DiskFileItemFactory; import org.apache.commons.io.FileUtils; import org.restlet.ext.fileupload.RestletFileUpload; import org.restlet.representation.Representation; import org.restlet.representation.StringRepresentation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class LLCSegmentCommitUpload extends PinotSegmentUploadRestletResource { private static Logger LOGGER = LoggerFactory.getLogger(LLCSegmentCommitUpload.class); public LLCSegmentCommitUpload() throws IOException { } @Override @HttpVerb("post") @Description("Uploads an LLC segment using split commit") @Summary("Uploads an LLC segment using split commit") @Paths({"/" + SegmentCompletionProtocol.MSG_TYPE_SEGMENT_UPLOAD}) // TODO: TInclude the generated file name in the response to the server public Representation post(Representation entity) { SegmentCompletionProtocol.Request.Params params = SegmentCompletionUtils.extractParams(getReference()); if (params == null) { return new StringRepresentation(SegmentCompletionProtocol.RESP_FAILED.toJsonString()); } LOGGER.info(params.toString()); boolean success = uploadSegment(params.getInstanceId(), params.getSegmentName(), params.getOffset()); if (success) { LOGGER.info("Uploaded segment successfully"); return new StringRepresentation(SegmentCompletionProtocol.ControllerResponseStatus.UPLOAD_SUCCESS.toString()); } else { LOGGER.info("Failed to upload segment"); return new StringRepresentation(SegmentCompletionProtocol.ControllerResponseStatus.FAILED.toString()); } } boolean uploadSegment(final String instanceId, final String segmentNameStr, final long offset) { // 1/ Create a factory for disk-based file items final DiskFileItemFactory factory = new DiskFileItemFactory(); // 2/ Create a new file upload handler based on the Restlet // FileUpload extension that will parse Restlet requests and // generates FileItems. final RestletFileUpload upload = new RestletFileUpload(factory); final List<FileItem> items; try { // The following statement blocks until the entire segment is read into memory/disk. items = upload.parseRequest(getRequest()); File dataFile = null; // TODO: refactor this part into a util method (almost duplicate code in PinotSegmentUploadRestletResource and // PinotSchemaRestletResource) for (FileItem fileItem : items) { String fieldName = fileItem.getFieldName(); if (dataFile == null) { if (fieldName != null && fieldName.equals(segmentNameStr)) { dataFile = new File(tempDir, fieldName); fileItem.write(dataFile); } else { LOGGER.warn("Invalid field name: {}", fieldName); } } else { LOGGER.warn("Got extra file item while uploading LLC segments: {}", fieldName); } // Remove the temp file // When the file is copied to instead of renamed to the new file, the temp file might be left in the dir fileItem.delete(); } if (dataFile == null) { LOGGER.error("Segment not included in request. Instance {}, segment {}", instanceId, segmentNameStr); return false; } // We will not check for quota here. Instead, committed segments will count towards the quota of a // table LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr); final String rawTableName = segmentName.getTableName(); final File tableDir = new File(baseDataDir, rawTableName); String generatedSegmentFileName = SegmentCompletionUtils.generateSegmentFileName(segmentNameStr, offset, instanceId); final File segmentFile = new File(tableDir, generatedSegmentFileName); // Always delete the segment file if it exists for split commit. if (segmentFile.exists()) { LOGGER.warn("Segment file {} exists. Replacing with upload from {}", segmentNameStr, instanceId); FileUtils.deleteQuietly(segmentFile); } FileUtils.moveFile(dataFile, segmentFile); LOGGER.info("Segment file " + generatedSegmentFileName); return true; } catch (Exception e) { LOGGER.error("File upload exception from instance {} for segment {}", instanceId, segmentNameStr, e); } return false; } } <file_sep>package com.linkedin.thirdeye.rootcause.impl; import com.linkedin.thirdeye.anomaly.events.EventDataProviderManager; import com.linkedin.thirdeye.anomaly.events.EventFilter; import com.linkedin.thirdeye.anomaly.events.EventType; import com.linkedin.thirdeye.datalayer.dto.EventDTO; import com.linkedin.thirdeye.rootcause.Pipeline; import com.linkedin.thirdeye.rootcause.PipelineContext; import com.linkedin.thirdeye.rootcause.PipelineResult; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.collections.CollectionUtils; import org.joda.time.DateTime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * HolidayEventsPipeline produces EventEntities associated with holidays within the current * TimeRange. It matches holidays based on incoming DimensionEntities (e.g. from contribution * analysis) and scores them based on the number of matching DimensionEntities. * Holiday pipeline will add a buffer of 2 days to the time range provided */ public class HolidayEventsPipeline extends Pipeline { private static final int HOLIDAY_DAYS_BUFFER = 2; private static final Logger LOG = LoggerFactory.getLogger(HolidayEventsPipeline.class); private final EventDataProviderManager eventDataProvider; /** * Constructor for dependency injection * * @param outputName pipeline output name * @param inputNames input pipeline names * @param eventDataProvider event data provider manager */ public HolidayEventsPipeline(String outputName, Set<String> inputNames, EventDataProviderManager eventDataProvider) { super(outputName, inputNames); this.eventDataProvider = eventDataProvider; } /** * Alternate constructor for RCAFrameworkLoader * * @param outputName pipeline output name * @param inputNames input pipeline names * @param ignore configuration properties (none) */ public HolidayEventsPipeline(String outputName, Set<String> inputNames, Map<String, Object> ignore) { super(outputName, inputNames); this.eventDataProvider = EventDataProviderManager.getInstance(); } @Override public PipelineResult run(PipelineContext context) { TimeRangeEntity current = TimeRangeEntity.getContextCurrent(context); TimeRangeEntity baseline = TimeRangeEntity.getContextBaseline(context); Set<DimensionEntity> dimensionEntities = context.filter(DimensionEntity.class); Map<String, DimensionEntity> urn2entity = EntityUtils.mapEntityURNs(dimensionEntities); List<EventDTO> events = getHolidayEvents(current, dimensionEntities); events.addAll(getHolidayEvents(baseline, dimensionEntities)); Set<EventEntity> entities = new HashSet<>(); for(EventDTO ev : events) { double dimensionScore = makeDimensionScore(urn2entity, ev.getTargetDimensionMap()); EventEntity entity = EventEntity.fromDTO(dimensionScore, ev); LOG.debug("{}: dimension={}, filter={}", entity.getUrn(), dimensionScore, ev.getTargetDimensionMap()); entities.add(entity); } return new PipelineResult(context, entities); } private List<EventDTO> getHolidayEvents(TimeRangeEntity timerangeEntity, Set<DimensionEntity> dimensionEntities) { long start = new DateTime(timerangeEntity.getStart()).minusDays(HOLIDAY_DAYS_BUFFER).getMillis(); long end = timerangeEntity.getEnd(); EventFilter filter = new EventFilter(); filter.setEventType(EventType.HOLIDAY.toString()); filter.setStartTime(start); filter.setEndTime(end); Map<String, List<String>> filterMap = new HashMap<>(); if (CollectionUtils.isNotEmpty(dimensionEntities)) { for (DimensionEntity dimensionEntity : dimensionEntities) { String dimensionName = dimensionEntity.getName(); String dimensionValue = dimensionEntity.getValue(); if (!filterMap.containsKey(dimensionName)) { filterMap.put(dimensionName, new ArrayList<String>()); } filterMap.get(dimensionName).add(dimensionValue); } } filter.setTargetDimensionMap(filterMap); return eventDataProvider.getEvents(filter); } static double makeDimensionScore(Map<String, DimensionEntity> urn2entity, Map<String, List<String>> dimensionFilterMap) { double sum = 0.0; Set<String> urns = filter2urns(dimensionFilterMap); for(String urn : urns) { if(urn2entity.containsKey(urn)) { sum += urn2entity.get(urn).getScore(); } } return sum; } static Set<String> filter2urns(Map<String, List<String>> dimensionFilterMap) { Set<String> urns = new HashSet<>(); for(Map.Entry<String, List<String>> e : dimensionFilterMap.entrySet()) { for(String val : e.getValue()) { urns.add(DimensionEntity.TYPE.formatURN(e.getKey(), val.toLowerCase())); } } return urns; } } <file_sep>import { ActionTypes } from '../actions/anomaly'; /** * Define the schema */ const INITIAL_STATE = { /** * State for loading */ loading: false, /** * State for loaded */ loaded: false, /** * State for failed request */ failed: false, /** * List the anomaly ids in order */ ids: [], /** * Items in hash map */ entities: {} }; export default function reducer(state = INITIAL_STATE, action = {}) { switch (action.type) { case ActionTypes.LOAD: { const anomalyList = action.payload.anomalyDetailsList; const ids = anomalyList.map((anomaly) => anomaly.anomalyId); const entities = anomalyList.reduce((entities, anomaly) => { entities[anomaly.anomalyId] = anomaly; return entities; }, {}); return Object.assign(state, { loading: false, loaded: true, ids, entities, }); } case ActionTypes.LOADING: return Object.assign(state, { loading: true, loaded: false }); case ActionTypes.REQUEST_FAIL: return Object.assign(state, { loading: false, failed: true }); } return state; } <file_sep>package com.linkedin.thirdeye.anomalydetection.context; import com.linkedin.thirdeye.constant.AnomalyFeedbackType; import com.linkedin.thirdeye.constant.FeedbackStatus; public interface AnomalyFeedback { /** * Set feedback type (e.g., anomaly, anomaly no action, etc.) * @param feedbackType feedback type */ void setFeedbackType(AnomalyFeedbackType feedbackType); /** * Get feedback type (e.g., anomaly, anomaly no action, etc.) * @return feedback type */ AnomalyFeedbackType getFeedbackType(); /** * Set status (e.g., in progress, resolve, etc.) of this feedback. * @param status status of this feedback */ void setStatus(FeedbackStatus status); /** * Get status (e.g., in progress, resolve, etc.) of this feedback. * @return status of this feedback */ FeedbackStatus getStatus(); /** * Set comment for this feedback. * @param comment comment for this feedback. */ void setComment(String comment); /** * Get comment of this feedback. * @return comment of this feedback. */ String getComment(); } <file_sep>package com.linkedin.thirdeye.dataframe; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import org.apache.commons.lang.ArrayUtils; /** * Container for a one-dimensional series of elements with a common primitive type. * Supports transparent conversion between different primitive types and implements * common logic for element management, transformation and aggregation. * * Series are designed to be immutable (albeit with some limitations due to Java's * primitive array model). Operations return new Series instances without modifying * the underlying data structures. */ public abstract class Series { public static final String GROUP_KEY = "key"; public static final String GROUP_VALUE = "value"; public static final String TOSTRING_NULL = "null"; public enum SeriesType { DOUBLE, LONG, STRING, BOOLEAN } enum JoinType { INNER, OUTER, LEFT, RIGHT } /** * Top-level interface to denote a function that may be applied to one (or multiple) series. * Functions may be applied either row-by-row across multiple series or to all values within * a single series. * <br/><b>NOTE:</b> Functions MAY NOT receive a {@code null} value as an input. Rather, if * any one of the input values is {@code null}, the result is set to {@code null} by the * Series framework. * <br/><b>NOTE:</b> Function MAY return {@code null} as a result, however. */ public interface Function { // left blank } public interface Conditional extends Function { // left blank } // @FunctionalInterface public interface DoubleConditional extends Conditional { boolean apply(double... values); } // @FunctionalInterface public interface LongConditional extends Conditional { boolean apply(long... values); } // @FunctionalInterface public interface StringConditional extends Conditional { boolean apply(String... values); } // @FunctionalInterface public interface BooleanConditional extends Conditional { boolean apply(boolean... values); } // @FunctionalInterface public interface DoubleFunction extends Function { double NULL = DoubleSeries.NULL; double apply(double... values); } // @FunctionalInterface public interface LongFunction extends Function { long NULL = LongSeries.NULL; long apply(long... values); } // @FunctionalInterface public interface StringFunction extends Function { String NULL = StringSeries.NULL; String apply(String... values); } // @FunctionalInterface public interface BooleanFunction extends Function { boolean apply(boolean... values); } // @FunctionalInterface public interface BooleanFunctionEx extends Function { byte TRUE = BooleanSeries.TRUE; byte FALSE = BooleanSeries.FALSE; byte NULL = BooleanSeries.NULL; byte apply(byte... values); } /** * Helper container for references generated by grouping */ public static final class Bucket { final int[] fromIndex; Bucket(int[] fromIndex) { this.fromIndex = fromIndex; } public int size() { return this.fromIndex.length; } } /** * Base class for specialized Series builders */ public static abstract class Builder { public abstract Series build(); public abstract Builder addSeries(Collection<Series> series); public Builder addSeries(Series... series) { return this.addSeries(Arrays.asList(series)); } } /** * Grouping container referencing a single series. Holds group keys and the indices of group * elements in the source series. Enables aggregation with custom user functions. */ public static final class SeriesGrouping { final Series keys; final Series source; final List<Bucket> buckets; SeriesGrouping(Series keys, Series source, List<Bucket> buckets) { if(keys.size() != buckets.size()) throw new IllegalArgumentException("key series and bucket count must be equal"); this.keys = keys; this.source = source; this.buckets = buckets; } SeriesGrouping(Series source) { this.keys = LongSeries.buildFrom(); this.source = source; this.buckets = Collections.emptyList(); } /** * Applies index-based groups to a different series. Used by DataFrame for grouping across * multiple series. * * @param s other series * @return SeriesGrouping with different size */ SeriesGrouping applyTo(Series s) { return new SeriesGrouping(this.keys, s, this.buckets); } /** * Returns the number of groups * * @return group count */ public int size() { return this.keys.size(); } /** * Returns the keys of each group in the container as series. * * @return key series */ public Series keys() { return this.keys; } /** * Returns the source series this grouping applies to. * * @return source series */ public Series source() { return this.source; } /** * Returns {@code true} if the grouping container does not hold any groups. * * @return {@code true} is empty, {@code false} otherwise. */ public boolean isEmpty() { return this.keys.isEmpty(); } /** * Applies {@code function} as aggregation function to all values per group and * returns the result as a new DataFrame with the number of elements equal to the size * of the key series. * If the series' native types do not match the required input type of {@code function}, * the series are converted transparently. The native type of the aggregated series is * determined by {@code function}'s output type. * * @param function aggregation function to map to each grouped series * @return grouped aggregation series */ public GroupingDataFrame aggregate(Function function) { Builder builder = this.source.getBuilder(); for(Bucket b : this.buckets) { builder.addSeries(this.source.project(b.fromIndex).aggregate(function)); } return makeAggregate(this.keys, builder.build()); } /** * Counts the number of elements in each group and returns the result as a new DataFrame * with the number of elements equal to the size of the key series. * * @return grouped aggregation series */ public GroupingDataFrame count() { long[] values = new long[this.buckets.size()]; int i = 0; for(Bucket b : this.buckets) { values[i++] = b.size(); } return makeAggregate(this.keys, LongSeries.buildFrom(values)); } static GroupingDataFrame makeAggregate(Series keys, Series values) { return new GroupingDataFrame(GROUP_KEY, GROUP_VALUE, keys, values); } } /** * GroupingDataFrame holds the result of a series aggregation after grouping. It functions like * a regular DataFrame, but provides additional comfort for accessing key and value columns. * * @see DataFrame */ public static final class GroupingDataFrame extends DataFrame { final String keyName; final String valueName; GroupingDataFrame(String keyName, String valueName, Series keys, Series values) { this.keyName = keyName; this.valueName = valueName; this.addSeries(keyName, keys); this.addSeries(valueName, values); this.setIndex(keyName); } public Series getKeys() { return this.get(this.keyName); } public Series getValues() { return this.get(this.valueName); } public String getKeyName() { return this.keyName; } public String getValueName() { return this.valueName; } } /** * Helper container for index-pairs generated by join logic */ static final class JoinPair { final int left; final int right; public JoinPair(int left, int right) { this.left = left; this.right = right; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } JoinPair joinPair = (JoinPair) o; return (left == joinPair.left) && (right == joinPair.right); } @Override public int hashCode() { int result = left; result = 31 * result + right; return result; } } /* ************************************************************************* * Public abstract interface * *************************************************************************/ /** * Returns the number of elements contained in the series. * * <br/><b>NOTE:</b> {@code null} values count as elements. * * @return series size */ public abstract int size(); /** * Returns the series' native type. * * @return series type */ public abstract SeriesType type(); /** * Slices the series from index {@code from} (inclusive) to index {@code to} * (exclusive) and returns the result as a series of the same native type. * * @param from start index (inclusive), must be >= 0 * @param to end index (exclusive), must be <= size * @return sliced series copy */ public abstract Series slice(int from, int to); /** * Returns the value referenced by {@code index} as double. The value is converted * transparently if the native type of the underlying series is different. The * {@code index} must be between {@code 0} and the size of the series. * * @param index index of value * @throws IndexOutOfBoundsException if index is outside the series bounds * @return double value */ public abstract double getDouble(int index); /** * Returns the value referenced by {@code index} as long. The value is converted * transparently if the native type of the underlying series is different. The * {@code index} must be between {@code 0} and the size of the series. * * @param index index of value * @throws IndexOutOfBoundsException if index is outside the series bounds * @return long value */ public abstract long getLong(int index); /** * Returns the value referenced by {@code index} as byte (tri-state boolean). * The value is converted transparently if the native type of the underlying * series is different. The {@code index} must be between {@code 0} and the * size of the series. * * @param index index of value * @throws IndexOutOfBoundsException if index is outside the series bounds * @return byte value */ public abstract byte getBoolean(int index); /** * Returns the value referenced by {@code index} as String. The value is converted * transparently if the native type of the underlying series is different. The * {@code index} must be between {@code 0} and the size of the series. * * @param index index of value * @throws IndexOutOfBoundsException if index is outside the series bounds * @return string value */ public abstract String getString(int index); /** * Returns {@code true} if the value referenced by {@code index} is null. Otherwise, * returns {@code false}. * * @param index index of value * @throws IndexOutOfBoundsException if index is outside the series bounds * @return {@code true} if value is null, otherwise {@code false} */ public abstract boolean isNull(int index); /** * Returns a human-readable String representation of the value referenced by {@code index}. * * @param index index of value * @throws IndexOutOfBoundsException if index is outside the series bounds * @return human-readable string representation */ public abstract String toString(int index); /** * Returns a copy of the series with values ordered in ascending order. * * <br/><b>NOTE:</b> BooleanSeries interprets {@code false} as smaller than {@code true}. * * @return sorted series copy */ public abstract Series sorted(); /** * Returns a copy of the series with {@code null} values replaced by the series' default * value. * * @return series copy with filled nulls */ public abstract Series fillNull(); /** * Returns a new builder instance for the native type encapsulated by this series. * * @return series builder */ public abstract Builder getBuilder(); /** * Returns a copy of the series with values replaced by {@code null} for every row in * {@code filter} that is not {@code true}. * * @param mask series to filter by * @return filtered series copy */ public abstract Series filter(BooleanSeries mask); /* ************************************************************************* * Internal abstract interface * *************************************************************************/ /** * Returns projection of the series. * * <br/><b>NOTE:</b> fromIndex <= -1 is filled with {@code null}. * <br/><b>NOTE:</b> array with length 0 returns empty series. * <br/><b>NOTE:</b> could replace {@code slice(int, int)}, but low performance * * @param fromIndex array with indices to project from (must be <= series size) * @return series projection */ abstract Series project(int[] fromIndex); /** * Compares values across two series with potentially different native types based on index. * If the types are different the values in {@code that} are transparently converted to the * native type of this series. * * <br/><b>Note:</b> the transparent conversion may cause different behavior between * {@code this.compare(that)} and {@code that.compare(this)}. * * @param that other series with same native type (may reference itself) * @param indexThis index in this series * @param indexThat index in the other series * @return 0 if the referenced values are equal, -1 if {@code this} is less than {@code that}, 1 otherwise */ abstract int compare(Series that, int indexThis, int indexThat); /** * Returns an array of indices with a size equal to the series size, such that the values * references by the indices are sorted in ascending order. * * <br/><b>NOTE:</b> output can be used directly by {@code project()} to create a sorted copy of the series. * * @return indices of sorted values */ abstract int[] sortedIndex(); /* ************************************************************************* * Public interface * *************************************************************************/ /** * Returns series {@code s} converted to type {@code type} unless native type matches already. * * @param type target type * @return converted series */ public final Series get(Series.SeriesType type) { switch(type) { case DOUBLE: return this.getDoubles(); case LONG: return this.getLongs(); case BOOLEAN: return this.getBooleans(); case STRING: return this.getStrings(); default: throw new IllegalArgumentException(String.format("Unknown series type '%s'", type)); } } /** * Returns a the series as DoubleSeries. The underlying series is converted * transparently if the series' native type is different. * * @return DoubleSeries equivalent */ public DoubleSeries getDoubles() { double[] values = new double[this.size()]; for(int i=0; i<this.size(); i++) { values[i] = this.getDouble(i); } return DoubleSeries.buildFrom(values); } /** * Returns the series as LongSeries. The underlying series is converted * transparently if the series' native type is different. * * @return LongSeries equivalent */ public LongSeries getLongs() { long[] values = new long[this.size()]; for(int i=0; i<this.size(); i++) { values[i] = this.getLong(i); } return LongSeries.buildFrom(values); } /** * Returns the series as BooleanSeries. The underlying series is converted * transparently if the series' native type is different. * * @return BooleanSeries equivalent */ public BooleanSeries getBooleans() { byte[] values = new byte[this.size()]; for(int i=0; i<this.size(); i++) { values[i] = this.getBoolean(i); } return BooleanSeries.buildFrom(values); } /** * Returns the series as StringSeries. The underlying series is converted * transparently if the series' native type is different. * * @return StringSeries equivalent */ public StringSeries getStrings() { String[] values = new String[this.size()]; for(int i=0; i<this.size(); i++) { values[i] = this.getString(i); } return StringSeries.buildFrom(values); } /** * Returns as copy of the series with the same native type. * * @return series copy */ public Series copy() { return this.slice(0, this.size()); } /** * Returns a copy of the series with values from {@code other} * appended at the end. If {@code other} has different native types they are * converted transparently. * * <br/><b>NOTE:</b> newSize = oldSize + otherSize * * @param other other series to append at the end * @return concatenated series */ public Series append(Series... other) { return this.getBuilder().addSeries(this).addSeries(other).build(); } /** * Fills {@code null} values in the series with a copy of the last valid value. The index * is traversed in ascending order. If the last valid value does not exist (such as for the * first element in a series) it is left at {@code null}. * * @return forward filled series */ public Series fillNullForward() { int lastValueIndex = -1; int[] fromIndex = new int[this.size()]; for(int i=0; i<this.size(); i++) { if(!isNull(i)) lastValueIndex = i; fromIndex[i] = lastValueIndex; } return this.project(fromIndex); } /** * Fills {@code null} values in the series with a copy of the last valid value. The index * is traversed in descending order. If the last valid value does not exist (such as for the * last element in a series) it is left at {@code null}. * * @return backward filled series */ public Series fillNullBackward() { int lastValueIndex = -1; int[] fromIndex = new int[this.size()]; for(int i=this.size()-1; i>=0; i--) { if(!isNull(i)) lastValueIndex = i; fromIndex[i] = lastValueIndex; } return this.project(fromIndex); } /** * Returns a copy of the series with all values' indices * shifted by {@code offset} positions while * leaving the series size unchanged. Values shifted outside to upper (or lower) * bounds of the series are dropped. Vacated positions are padded with {@code null}. * * <br/><b>NOTE:</b> for each value, newIndex = oldIndex + offset * * @param offset offset to shift values by. Can be positive or negative. * @return shifted series copy */ // NOTE: override for performance public Series shift(int offset) { int[] fromIndex = new int[this.size()]; int from = 0; for(int i=0; i<Math.min(offset, this.size()); i++) { fromIndex[from++] = -1; } for(int i=Math.max(offset, 0); i<Math.max(Math.min(this.size() + offset, this.size()), 0); i++) { fromIndex[from++] = i - offset; } for(int i=Math.max(this.size() + offset, 0); i<this.size(); i++) { fromIndex[from++] = -1; } return this.project(fromIndex); } /** * Returns {@code true} is there are no values in the series. Otherwise returns {@code false}. * * <br/><b>NOTE:</b> {@code null} values count as elements. * * @return {@code true} if empty, {@code false} otherwise */ public final boolean isEmpty() { return this.size() <= 0; } /** * Returns {@code true} if the series contains at least one {@code null}. Otherwise * returns {@code false}. * * @return {@code true} if empty, {@code false} otherwise */ public final boolean hasNull() { return this.count() < this.size(); } /** * Returns the number of non-null values in the series. * * @return count of non-null values */ public final int count() { int countNotNull = 0; for(int i=0; i<this.size(); i++) if(!this.isNull(i)) countNotNull++; return countNotNull; } /** * Returns a copy of the series containing at maximum the first {@code n} elements of the series. * If {@code n} is larger than the series size, the entire series is returned. Additional values * to make up the difference between {@code n} and the size are not padded. * * @param n number of elements * @return series copy with at most the first {@code n} elements */ public Series head(int n) { return this.slice(0, Math.min(n, this.size())); } /** * Returns a copy of the series containing at maximum the last {@code n} elements of the series. * If {@code n} is larger than the series size, the entire series is returned. Additional values * to make up the difference between {@code n} and the size are not padded. * * @param n number of elements * @return series copy with at most the last {@code n} elements */ public Series tail(int n) { int len = this.size(); return this.slice(len - Math.min(n, len), len); } /** * Returns a copy of the series omitting any elements before index {@code n}. * If {@code n} is {@code 0}, the entire series is returned. If {@code n} is greater than * the series size, an empty series is returned. * * @param from start index of copy (inclusive) * @return series copy with elements from index {@code from}. */ public Series sliceFrom(int from) { return this.slice(Math.max(from, 0), this.size()); } /** * Returns a copy of the series omitting any elements equal to or after index {@code n}. * If {@code n} is equal or greater than the series size, the entire series is returned. * If {@code n} is {@code 0}, an empty series is returned. * * @param to end index of copy (exclusive) * @return series copy with elements before from index {@code from}. */ public Series sliceTo(int to) { return this.slice(0, Math.min(to, this.size())); } /** * Returns a copy of the series with elements in reverse order from the original series. * * @return reversed series */ public Series reverse() { int[] fromIndex = new int[this.size()]; for (int i = 0; i < fromIndex.length; i++) { fromIndex[i] = fromIndex.length - i - 1; } return this.project(fromIndex); } /** * Returns a copy of the series with each distinct value of the * source series appearing exactly once. The values are further sorted in ascending order. * * @return sorted series copy with distinct unique values */ public Series unique() { if(this.size() <= 1) return this; Series sorted = this.sorted(); List<Integer> indices = new ArrayList<>(); indices.add(0); for(int i=1; i<this.size(); i++) { if(sorted.compare(sorted, i-1, i) != 0) indices.add(i); } int[] fromIndex = ArrayUtils.toPrimitive(indices.toArray(new Integer[indices.size()])); return sorted.project(fromIndex); } /** * Returns a copy of the series omitting any {@code null} values. * * @return series copy without {@code nulls} */ public Series dropNull() { int[] fromIndex = new int[this.size()]; int count = 0; for(int i=0; i<this.size(); i++) { if(!isNull(i)) fromIndex[count++] = i; } return this.project(Arrays.copyOf(fromIndex, count)); } /** * Returns a BooleanSeries which contains a value indicating the null-equivalence for each * value in the original series (this). * * @return boolean series indicating null-equivalence of each value */ public BooleanSeries isNull() { byte[] values = new byte[this.size()]; for(int i=0; i<this.size(); i++) { values[i] = BooleanSeries.valueOf(this.isNull(i)); } return BooleanSeries.buildFrom(values); } /** * Returns a copy of the series with values replaced by {@code null} for every row in * the result of applying {@code conditional} to the series that is not {@code true}. * * @param conditional conditional to apply and filter by * @return filtered series copy */ public Series filter(Conditional conditional) { return this.filter(this.map(conditional)); } // // NOTE: co-variant method messiness // /** * Applies {@code function} to the series row by row and returns the results as a new series. * If the series' native types do not match the required input type of {@code function}, * the series are converted transparently. The native type of the returned series is * determined by {@code function}'s output type. * * @param function function to apply to each row * @param series series to apply function to * @return series with evaluation results */ public static Series map(Function function, Series... series) { if(function instanceof DoubleFunction) { return DoubleSeries.map((DoubleFunction)function, series); } else if(function instanceof LongFunction) { return LongSeries.map((LongFunction)function, series); } else if(function instanceof StringFunction) { return StringSeries.map((StringFunction)function, series); } else if(function instanceof BooleanFunction) { return BooleanSeries.map((BooleanFunction)function, series); } else if(function instanceof BooleanFunctionEx) { return BooleanSeries.map((BooleanFunctionEx)function, series); } else if(function instanceof DoubleConditional) { return DoubleSeries.map((DoubleConditional)function, series); } else if(function instanceof LongConditional) { return LongSeries.map((LongConditional)function, series); } else if(function instanceof StringConditional) { return StringSeries.map((StringConditional)function, series); } else if(function instanceof BooleanConditional) { return BooleanSeries.map((BooleanConditional)function, series); } throw new IllegalArgumentException(String.format("Unknown function type '%s'", function.getClass())); } /** * Applies {@code function} to the series row by row and returns the results as a new series. * If the series' native type does not match the required input type of {@code function}, * the series is converted transparently. The native type of the returned series is * determined by {@code function}'s output type. * * @param function function to map to each element in the series * @return series with evaluation results */ public final Series map(Function function) { return map(function, this); } /** * @see Series#map(Function) */ public final DoubleSeries map(DoubleFunction function) { return (DoubleSeries)map(function, this); } /** * @see Series#map(Function) */ public final LongSeries map(LongFunction function) { return (LongSeries)map(function, this); } /** * @see Series#map(Function) */ public final StringSeries map(StringFunction function) { return (StringSeries)map(function, this); } /** * @see Series#map(Function) */ public final BooleanSeries map(BooleanFunction function) { return (BooleanSeries)map(function, this); } /** * @see Series#map(Function) */ public final BooleanSeries map(BooleanFunctionEx function) { return (BooleanSeries)map(function, this); } /** * @see Series#map(Function) */ public final BooleanSeries map(Conditional conditional) { return (BooleanSeries)map(conditional, this); } // // NOTE: co-variant method messiness // /** * Applies {@code function} as aggregation function to all values in the series at once and * returns the result as a new series with a single element. * If the series' native type does not match the required input type of {@code function}, * the series is converted transparently. The native type of the returned series is * determined by {@code function}'s output type. * * @param function aggregation function to map to the series * @return single element series */ public final Series aggregate(Function function) { if(function instanceof DoubleFunction) { return DoubleSeries.aggregate((DoubleFunction)function, this); } else if(function instanceof LongFunction) { return LongSeries.aggregate((LongFunction)function, this); } else if(function instanceof StringFunction) { return StringSeries.aggregate((StringFunction)function, this); } else if(function instanceof BooleanFunction) { return BooleanSeries.aggregate((BooleanFunction)function, this); } else if(function instanceof BooleanFunctionEx) { return BooleanSeries.aggregate((BooleanFunctionEx)function, this); } else if(function instanceof DoubleConditional) { return DoubleSeries.aggregate((DoubleConditional)function, this); } else if(function instanceof LongConditional) { return LongSeries.aggregate((LongConditional)function, this); } else if(function instanceof StringConditional) { return StringSeries.aggregate((StringConditional)function, this); } else if(function instanceof BooleanConditional) { return BooleanSeries.aggregate((BooleanConditional)function, this); } throw new IllegalArgumentException(String.format("Unknown function type '%s'", function.getClass())); } /** * @see Series#aggregate(Function) */ public final DoubleSeries aggregate(DoubleFunction function) { return (DoubleSeries)this.aggregate((Function)function); } /** * @see Series#aggregate(Function) */ public final LongSeries aggregate(LongFunction function) { return (LongSeries)this.aggregate((Function)function); } /** * @see Series#aggregate(Function) */ public final StringSeries aggregate(StringFunction function) { return (StringSeries)this.aggregate((Function)function); } /** * @see Series#aggregate(Function) */ public final BooleanSeries aggregate(BooleanFunction function) { return (BooleanSeries)this.aggregate((Function)function); } /** * @see Series#aggregate(Function) */ public final BooleanSeries aggregate(BooleanFunctionEx function) { return (BooleanSeries)this.aggregate((Function)function); } /** * @see Series#aggregate(Function) */ public final BooleanSeries aggregate(Conditional conditional) { return (BooleanSeries)this.aggregate((Function)conditional); } /** * Returns a SeriesGrouping based on value. Elements are grouped into separate buckets for each * distinct value in the series. * * <br/><b>NOTE:</b> the resulting keys are equivalent to calling {@code unique()} on the series. * * @return grouping by value */ public final SeriesGrouping groupByValue() { if(this.isEmpty()) return new SeriesGrouping(this); List<Bucket> buckets = new ArrayList<>(); int[] sref = this.sortedIndex(); int bucketOffset = 0; for(int i=1; i<sref.length; i++) { if(this.compare(this, sref[i-1], sref[i]) != 0) { int[] fromIndex = Arrays.copyOfRange(sref, bucketOffset, i); buckets.add(new Bucket(fromIndex)); bucketOffset = i; } } int[] fromIndex = Arrays.copyOfRange(sref, bucketOffset, sref.length); buckets.add(new Bucket(fromIndex)); // keys from buckets int[] keyIndex = new int[buckets.size()]; int i = 0; for(Bucket b : buckets) { keyIndex[i++] = b.fromIndex[0]; } return new SeriesGrouping(this.project(keyIndex), this, buckets); } /** * Returns a SeriesGrouping based on element count per buckets. Elements are grouped into buckets * based on a greedy algorithm with fixed bucket size. The size of all buckets (except for the * last) is guaranteed to be equal to {@code bucketSize}. * * @param bucketSize maximum number of elements per bucket * @return grouping by element count */ public final SeriesGrouping groupByCount(int bucketSize) { return groupByCount(this.size(), bucketSize).applyTo(this); } /** * @see Series#groupByCount(int) */ static final SeriesGrouping groupByCount(int size, int bucketSize) { if(bucketSize <= 0) throw new IllegalArgumentException("bucketSize must be greater than 0"); if(size <= 0) return new SeriesGrouping(null); bucketSize = Math.min(bucketSize, size); int numBuckets = (size - 1) / bucketSize + 1; long[] keys = new long[numBuckets]; List<Bucket> buckets = new ArrayList<>(); for(int i=0; i<numBuckets; i++) { int from = i*bucketSize; int to = Math.min((i+1)*bucketSize, size); int[] fromIndex = new int[to-from]; for(int j=0; j<fromIndex.length; j++) { fromIndex[j] = j + from; } buckets.add(new Bucket(fromIndex)); keys[i] = i; } return new SeriesGrouping(DataFrame.toSeries(keys), null, buckets); } /** * Returns a SeriesGrouping based on a fixed number of buckets. Elements are grouped into buckets * based on a greedy algorithm to approximately evenly fill buckets. The number of buckets * is guaranteed to be equal to {@code partitionCount} even if some remain empty. * * @param partitionCount number of buckets * @return grouping by bucket count */ public final SeriesGrouping groupByPartitions(int partitionCount) { return groupByPartitions(this.size(), partitionCount).applyTo(this); } /** * @see Series#groupByPartitions(int) */ static final SeriesGrouping groupByPartitions(int size, int partitionCount) { if(partitionCount <= 0) throw new IllegalArgumentException("partitionCount must be greater than 0"); if(size <= 0) return new SeriesGrouping(null); double perPartition = size / (double)partitionCount; long[] keys = new long[partitionCount]; List<Bucket> buckets = new ArrayList<>(); for(int i=0; i<partitionCount; i++) { int from = (int)Math.round(i * perPartition); int to = (int)Math.round((i+1) * perPartition); int[] fromIndex = new int[to-from]; for(int j=0; j<fromIndex.length; j++) { fromIndex[j] = j + from; } buckets.add(new Bucket(fromIndex)); keys[i] = i; } return new SeriesGrouping(DataFrame.toSeries(keys), null, buckets); } /** * Returns an (overlapping) SeriesGrouping based on a moving window size. Elements are grouped * into overlapping buckets in sequences of {@code windowSize} consecutive items. The number * of buckets is guaranteed to be equal to {@code series_size - moving_window_size + 1}, or * 0 if the window size is greater than the series size. * * @param windowSize size of moving window * @return grouping by moving window */ public final SeriesGrouping groupByMovingWindow(int windowSize) { return groupByMovingWindow(this.size(), windowSize).applyTo(this); } /** * @see Series#groupByMovingWindow(int) */ static final SeriesGrouping groupByMovingWindow(int size, int windowSize) { if(windowSize <= 0) throw new IllegalArgumentException("windowSize must be greater than 0"); if(size < windowSize) return new SeriesGrouping(null); int windowCount = size - windowSize + 1; long[] keys = new long[windowCount]; List<Bucket> buckets = new ArrayList<>(); for(int i=0; i<windowCount; i++) { keys[i] = i; int[] fromIndex = new int[windowSize]; for(int j=0; j<windowSize; j++) { fromIndex[j] = i + j; } buckets.add(new Bucket(fromIndex)); } return new SeriesGrouping(DataFrame.toSeries(keys), null, buckets); } /** * Returns an (overlapping) SeriesGrouping based on an expanding window. Elements are grouped * into overlapping buckets in expanding sequences of consecutive items (always starting with * index {@code 0}). The number of buckets is guaranteed to be equal to {@code series_size}. * * @return grouping by expanding window */ public final SeriesGrouping groupByExpandingWindow() { return groupByExpandingWindow(this.size()).applyTo(this); } /** * @see Series#groupByExpandingWindow() */ static final SeriesGrouping groupByExpandingWindow(int size) { if(size <= 0) return new SeriesGrouping(null); long[] keys = new long[size]; List<Bucket> buckets = new ArrayList<>(); for(int i=0; i<size; i++) { keys[i] = i; int[] fromIndex = new int[i + 1]; for(int j=0; j<=i; j++) { fromIndex[j] = j; } buckets.add(new Bucket(fromIndex)); } return new SeriesGrouping(DataFrame.toSeries(keys), null, buckets); } /** * Returns a concatenation of {@code series} as a new series with a native type equal * to the first series. If subsequent series have different native types they are * converted transparently. * * @param series series to concatenate * @return concatenated series */ public static Series concatenate(Series... series) { if(series.length <= 0) throw new IllegalArgumentException("Must concatenate at least one series"); Series first = series[0]; Series[] rest = Arrays.copyOfRange(series, 1, series.length); return first.append(rest); } /* ************************************************************************* * Internal interface * *************************************************************************/ /** * Returns index tuples (pairs) for a join performed based on value. * * <br/><b>NOTE:</b> the implementation uses merge join. Thus, the index pairs reference * values in ascending order. * * @see Series#compare(Series, int, int) * * @param other series to match values against * @param type type of join to perform * @return list of index pairs for join */ List<JoinPair> join(Series other, JoinType type) { // NOTE: merge join int[] lref = this.sortedIndex(); int[] rref = other.sortedIndex(); List<JoinPair> pairs = new ArrayList<>(); int i = 0; int j = 0; while(i < this.size() || j < other.size()) { if(j >= other.size() || (i < this.size() && this.compare(other, lref[i], rref[j]) < 0)) { switch(type) { case LEFT: case OUTER: pairs.add(new JoinPair(lref[i], -1)); default: } i++; } else if(i >= this.size() || (j < other.size() && this.compare(other, lref[i], rref[j]) > 0)) { switch(type) { case RIGHT: case OUTER: pairs.add(new JoinPair(-1, rref[j])); default: } j++; } else if(i < this.size() && j < other.size()) { // generate cross product // count similar values on the left int lcount = 1; while(i + lcount < this.size() && this.compare(this, lref[i + lcount], lref[i + lcount - 1]) == 0) { lcount++; } // count similar values on the right int rcount = 1; while(j + rcount < other.size() && other.compare(other, rref[j + rcount], rref[j + rcount - 1]) == 0) { rcount++; } for(int l=0; l<lcount; l++) { for(int r=0; r<rcount; r++) { pairs.add(new JoinPair(lref[i + l], rref[j + r])); } } i += lcount; j += rcount; } } return pairs; } /* ************************************************************************** * Code grave ***************************************************************************/ // NOTE: too slow // public Series sorted() { // return this.project(this.sortedIndex()); // } // NOTE: too slow // int[] sortedIndex() { // Integer[] fromIndex = new Integer[this.size()]; // for(int i=0; i<this.size(); i++) // fromIndex[i] = i; // // final Series s = this; // Arrays.sort(fromIndex, new Comparator<Integer>() { // @Override // public int compare(Integer o1, Integer o2) { // return s.compare(s, o1, o2); // } // }); // // return ArrayUtils.toPrimitive(fromIndex); // } } <file_sep>package com.linkedin.thirdeye.dataframe; import java.io.IOException; import java.io.InputStreamReader; import java.io.Reader; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import org.testng.Assert; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; public class DataFrameTest { final static byte TRUE = BooleanSeries.TRUE; final static byte FALSE = BooleanSeries.FALSE; final static double DNULL = DoubleSeries.NULL; final static long LNULL = LongSeries.NULL; final static String SNULL = StringSeries.NULL; final static byte BNULL = BooleanSeries.NULL; final static double COMPARE_DOUBLE_DELTA = 0.001; final static long[] INDEX = new long[] { -1, 1, -2, 4, 3 }; final static double[] VALUES_DOUBLE = new double[] { -2.1, -0.1, 0.0, 0.5, 1.3 }; final static long[] VALUES_LONG = new long[] { -2, 1, 0, 1, 2 }; final static String[] VALUES_STRING = new String[] { "-2.3", "-1", "0.0", "0.5", "0.13e1" }; final static byte[] VALUES_BOOLEAN = new byte[] { 1, 1, 0, 1, 1 }; // TODO test double batch function // TODO test string batch function // TODO test boolean batch function // TODO string test head, tail, accessors // TODO boolean test head, tail, accessors // TODO shift double, long, boolean // TODO fill double, long, boolean DataFrame df; @BeforeMethod public void before() { df = new DataFrame(INDEX) .addSeries("double", VALUES_DOUBLE) .addSeries("long", VALUES_LONG) .addSeries("string", VALUES_STRING) .addSeries("boolean", VALUES_BOOLEAN); } @Test public void testEnforceSeriesLengthPass() { df.addSeries("series", VALUES_DOUBLE); } @Test(expectedExceptions = IllegalArgumentException.class) public void testEnforceSeriesLengthFail() { df.addSeries("series", 0.1, 3.2); } @Test public void testSeriesName() { df.addSeries("ab", VALUES_DOUBLE); df.addSeries("_a", VALUES_DOUBLE); df.addSeries("a1", VALUES_DOUBLE); } @Test public void testChainedEqualsSeparate() { DataFrame dfChained = new DataFrame() .addSeries("test", 1, 2, 3) .addSeries("drop", 1, 2, 3) .renameSeries("test", "checkme") .dropSeries("drop"); DataFrame dfSeparate = new DataFrame(); dfSeparate.addSeries("test", 1, 2, 3); dfSeparate.addSeries("drop", 1, 2, 3); dfSeparate.renameSeries("test", "checkme"); dfSeparate.dropSeries("drop"); Assert.assertEquals(dfChained.getSeriesNames().size(), 1); Assert.assertEquals(dfSeparate.getSeriesNames().size(), 1); Assert.assertEquals(dfChained, dfSeparate); } @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "testSeriesNameFailProvider") public void testSeriesNameFail(String name) { df.addSeries(name, VALUES_DOUBLE); } @DataProvider(name = "testSeriesNameFailProvider") public Object[][] testSeriesNameFailProvider() { return new Object[][] { { null }, { "" }, { "1a" }, { "a,b" }, { "a-b" }, { "a+b" }, { "a*b" }, { "a/b" }, { "a=b" }, { "a>b" } }; } @Test public void testIndexColumn() { DataFrame dfEmpty = new DataFrame(); Assert.assertTrue(dfEmpty.getSeriesNames().isEmpty()); DataFrame dfIndexRange = new DataFrame(0); Assert.assertEquals(dfIndexRange.getSeriesNames(), Collections.singleton("index")); } @Test public void testDoubleNoDataDuplication() { DoubleSeries first = DataFrame.toSeries(VALUES_DOUBLE); DoubleSeries second = DataFrame.toSeries(VALUES_DOUBLE); Assert.assertSame(first.values(), second.values()); } @Test public void testDoubleToDouble() { assertEquals(DataFrame.toSeries(VALUES_DOUBLE).getDoubles(), VALUES_DOUBLE); } @Test public void testDoubleToLong() { assertEquals(DataFrame.toSeries(VALUES_DOUBLE).getLongs(), -2, 0, 0, 0, 1); } @Test public void testDoubleToBoolean() { assertEquals(DataFrame.toSeries(VALUES_DOUBLE).getBooleans(), TRUE, TRUE, FALSE, TRUE, TRUE); } @Test public void testDoubleToString() { assertEquals(DataFrame.toSeries(VALUES_DOUBLE).getStrings(), "-2.1", "-0.1", "0.0", "0.5", "1.3"); } @Test public void testLongToDouble() { assertEquals(DataFrame.toSeries(VALUES_LONG).getDoubles(), -2.0, 1.0, 0.0, 1.0, 2.0); } @Test public void testLongToLong() { assertEquals(DataFrame.toSeries(VALUES_LONG).getLongs(), VALUES_LONG); } @Test public void testLongToBoolean() { assertEquals(DataFrame.toSeries(VALUES_LONG).getBooleans(), TRUE, TRUE, FALSE, TRUE, TRUE); } @Test public void testLongToString() { assertEquals(DataFrame.toSeries(VALUES_LONG).getStrings(), "-2", "1", "0", "1", "2"); } @Test public void testBooleanToDouble() { assertEquals(DataFrame.toSeries(VALUES_BOOLEAN).getDoubles(), 1.0, 1.0, 0.0, 1.0, 1.0); } @Test public void testBooleanToLong() { assertEquals(DataFrame.toSeries(VALUES_BOOLEAN).getLongs(), TRUE, TRUE, FALSE, TRUE, TRUE); } @Test public void testBooleanToBoolean() { assertEquals(DataFrame.toSeries(VALUES_BOOLEAN).getBooleans(), VALUES_BOOLEAN); } @Test public void testBooleanToString() { assertEquals(DataFrame.toSeries(VALUES_BOOLEAN).getStrings(), "true", "true", "false", "true", "true"); } @Test public void testStringToDouble() { assertEquals(DataFrame.toSeries(VALUES_STRING).getDoubles(), -2.3, -1.0, 0.0, 0.5, 1.3); } @Test public void testStringToDoubleNulls() { Series s = DataFrame.toSeries("", null, "-2.1e1"); assertEquals(s.getDoubles(), DNULL, DNULL, -21.0d); } @Test public void testStringToLong() { // NOTE: transparent conversion via double assertEquals(DataFrame.toSeries(VALUES_STRING).getLongs(), -2, -1, 0, 0, 1); } @Test public void testStringToLongNulls() { // NOTE: transparent conversion via double Series s = DataFrame.toSeries("", null, "-1.0"); assertEquals(s.getLongs(), LNULL, LNULL, -1); } @Test public void testStringToBoolean() { // NOTE: transparent conversion via double assertEquals(DataFrame.toSeries(VALUES_STRING).getBooleans(), TRUE, TRUE, FALSE, TRUE, TRUE); } @Test public void testStringToBooleanNulls() { // NOTE: transparent conversion via double Series s = DataFrame.toSeries("", null, "true"); assertEquals(s.getBooleans(), BNULL, BNULL, TRUE); } @Test public void testStringToString() { assertEquals(DataFrame.toSeries(VALUES_STRING).getStrings(), VALUES_STRING); } @Test public void testDoubleBuilderNull() { assertEquals(DoubleSeries.builder().addValues((Double)null).build(), DNULL); } @Test public void testLongBuilderNull() { assertEquals(LongSeries.builder().addValues((Long)null).build(), LNULL); } @Test public void testStringBuilderNull() { assertEquals(StringSeries.builder().addValues((String)null).build(), SNULL); } @Test public void testBooleanBuilderNull() { assertEquals(BooleanSeries.builder().addValues((Byte)null).build(), BNULL); } @Test public void testBooleanBuilderNullBoolean() { assertEquals(BooleanSeries.builder().addBooleanValues((Boolean)null).build(), BNULL); } @Test public void testDataFrameBuilderDynamicTyping() { DataFrame.Builder builder = DataFrame.builder("double", "long", "string", "boolean"); builder.append(4.0d, 1, null, "true"); builder.append(null, 2, "2", "true"); builder.append(2.3d, "", "hi", "false"); builder.append(1.0d, 4, "4", ""); DataFrame df = builder.build(); Assert.assertEquals(df.get("double").type(), Series.SeriesType.DOUBLE); Assert.assertEquals(df.get("long").type(), Series.SeriesType.LONG); Assert.assertEquals(df.get("string").type(), Series.SeriesType.STRING); Assert.assertEquals(df.get("boolean").type(), Series.SeriesType.BOOLEAN); assertEquals(df.getDoubles("double"), 4, DNULL, 2.3, 1); assertEquals(df.getLongs("long"), 1, 2, LNULL, 4); assertEquals(df.getStrings("string"), SNULL, "2", "hi", "4"); assertEquals(df.getBooleans("boolean"),TRUE, TRUE, FALSE, BNULL); } @Test public void testDataFrameBuilderStaticTyping() { DataFrame.Builder builder = DataFrame.builder("double:DOUBLE", "long:LONG", "string:STRING", "boolean:BOOLEAN"); builder.append(4.0d, 1, null, "true"); builder.append(null, 2.34, "2", "1"); builder.append("2", "", "3", "false"); builder.append(1.0d, 4, "4", ""); DataFrame df = builder.build(); Assert.assertEquals(df.get("double").type(), Series.SeriesType.DOUBLE); Assert.assertEquals(df.get("long").type(), Series.SeriesType.LONG); Assert.assertEquals(df.get("string").type(), Series.SeriesType.STRING); Assert.assertEquals(df.get("boolean").type(), Series.SeriesType.BOOLEAN); assertEquals(df.getDoubles("double"), 4, DNULL, 2, 1); assertEquals(df.getLongs("long"), 1, 2, LNULL, 4); assertEquals(df.getStrings("string"), SNULL, "2", "3", "4"); assertEquals(df.getBooleans("boolean"),TRUE, TRUE, FALSE, BNULL); } @Test(expectedExceptions = NumberFormatException.class) public void testDataFrameBuilderStaticTypingFailDouble() { DataFrame.builder("double:DOUBLE").append("true").build(); } @Test(expectedExceptions = NumberFormatException.class) public void testDataFrameBuilderStaticTypingFailLong() { DataFrame.builder("long:LONG").append("true").build(); } @Test public void testDoubleNull() { Series s = DataFrame.toSeries(1.0, DNULL, 2.0); assertEquals(s.getDoubles(), 1.0, DNULL, 2.0); assertEquals(s.getLongs(), 1, LNULL, 2); assertEquals(s.getBooleans(), TRUE, BNULL, TRUE); assertEquals(s.getStrings(), "1.0", SNULL, "2.0"); } @Test public void testLongNull() { Series s = DataFrame.toSeries(1, LNULL, 2); assertEquals(s.getDoubles(), 1.0, DNULL, 2.0); assertEquals(s.getLongs(), 1, LNULL, 2); assertEquals(s.getBooleans(), TRUE, BNULL, TRUE); assertEquals(s.getStrings(), "1", SNULL, "2"); } @Test public void testBooleanNull() { Series s = DataFrame.toSeries(TRUE, BNULL, FALSE); assertEquals(s.getDoubles(), 1.0, DNULL, 0.0); assertEquals(s.getLongs(), 1, LNULL, 0); assertEquals(s.getBooleans(), TRUE, BNULL, FALSE); assertEquals(s.getStrings(), "true", SNULL, "false"); } @Test public void testStringNull() { Series s = DataFrame.toSeries("1.0", SNULL, "2.0"); assertEquals(s.getDoubles(), 1.0, DNULL, 2.0); assertEquals(s.getLongs(), 1, LNULL, 2); assertEquals(s.getBooleans(), TRUE, BNULL, TRUE); assertEquals(s.getStrings(), "1.0", SNULL, "2.0"); } @Test public void testDoubleInfinity() { Series s = DataFrame.toSeries(DoubleSeries.POSITIVE_INFINITY, DoubleSeries.NEGATIVE_INFINITY); assertEquals(s.getDoubles(), Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY); assertEquals(s.getLongs(), LongSeries.MAX_VALUE, LongSeries.MIN_VALUE); assertEquals(s.getBooleans(), BooleanSeries.TRUE, BooleanSeries.TRUE); assertEquals(s.getStrings(), "Infinity", "-Infinity"); assertEquals(DataFrame.toSeries("Infinity", "-Infinity").getDoubles(), Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY); } @Test public void testMapDoubleToDouble() { DoubleSeries in = DataFrame.toSeries(VALUES_DOUBLE); DoubleSeries out = in.map(new DoubleSeries.DoubleFunction() { public double apply(double... values) { return values[0] * 2; } }); assertEquals(out, -4.2, -0.2, 0.0, 1.0, 2.6); } @Test public void testMapDoubleToBoolean() { DoubleSeries in = DataFrame.toSeries(VALUES_DOUBLE); BooleanSeries out = in.map(new DoubleSeries.DoubleConditional() { public boolean apply(double... values) { return values[0] <= 0.3; } }); assertEquals(out, TRUE, TRUE, TRUE, FALSE, FALSE); } @Test public void testMapDataFrameAsDouble() { DoubleSeries out = df.map(new Series.DoubleFunction() { public double apply(double[] values) { return values[0] + values[1] + values[2]; } }, "long", "string", "boolean"); assertEquals(out, -3.3, 1.0, 0.0, 2.5, 4.3); } @Test public void testOverrideWithGeneratedSeries() { DoubleSeries out = df.getDoubles("double").map(new DoubleSeries.DoubleFunction() { public double apply(double... values) { return values[0] * 2; } }); df = df.addSeries("double", out); assertEquals(df.getDoubles("double"), -4.2, -0.2, 0.0, 1.0, 2.6); } @Test public void testSortDouble() { DoubleSeries in = DataFrame.toSeries(3, 1.5, 1.3, 5, 1.9, DNULL); assertEquals(in.sorted(), DNULL, 1.3, 1.5, 1.9, 3, 5); } @Test public void testSortLong() { LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19, LNULL); assertEquals(in.sorted(), LNULL, 3, 5, 13, 15, 19); } @Test public void testSortString() { StringSeries in = DataFrame.toSeries("b", "a", "ba", "ab", "aa", SNULL); assertEquals(in.sorted(), SNULL, "a", "aa", "ab", "b", "ba"); } @Test public void testSortBoolean() { BooleanSeries in = DataFrame.toSeries(TRUE, FALSE, FALSE, TRUE, FALSE, BNULL); assertEquals(in.sorted(), BNULL, FALSE, FALSE, FALSE, TRUE, TRUE); } @Test public void testProject() { int[] fromIndex = new int[] { 1, -1, 4, 0 }; DataFrame ndf = df.project(fromIndex); assertEquals(ndf.getLongs("index"), 1, LNULL, 3, -1); assertEquals(ndf.getDoubles("double"), -0.1, DNULL, 1.3, -2.1); assertEquals(ndf.getLongs("long"), 1, LNULL, 2, -2); assertEquals(ndf.getStrings("string"), "-1", SNULL, "0.13e1", "-2.3"); assertEquals(ndf.getBooleans("boolean"), TRUE, BNULL, TRUE, TRUE); } @Test public void testSortByIndex() { df = df.sortedBy("index"); // NOTE: internal logic uses reorder() for all sorting assertEquals(df.getLongs("index"), -2, -1, 1, 3, 4); assertEquals(df.getDoubles("double"), 0.0, -2.1, -0.1, 1.3, 0.5); assertEquals(df.getLongs("long"), 0, -2, 1, 2, 1); assertEquals(df.getStrings("string"), "0.0", "-2.3", "-1", "0.13e1", "0.5"); assertEquals(df.getBooleans("boolean"), FALSE, TRUE, TRUE, TRUE, TRUE); } @Test public void testSortByDouble() { df = df.addSeries("myseries", 0.1, -2.1, 3.3, 4.6, -7.8 ); df = df.sortedBy("myseries"); assertEquals(df.getLongs("index"), 3, 1, -1, -2, 4); assertEquals(df.getLongs("long"), 2, 1, -2, 0, 1); } @Test public void testSortByLong() { df = df.addSeries("myseries", 1, -21, 33, 46, -78 ); df = df.sortedBy("myseries"); assertEquals(df.getLongs("index"), 3, 1, -1, -2, 4); assertEquals(df.getLongs("long"), 2, 1, -2, 0, 1); } @Test public void testSortByString() { df = df.addSeries("myseries", "b", "aa", "bb", "c", "a" ); df = df.sortedBy("myseries"); assertEquals(df.getLongs("index"), 3, 1, -1, -2, 4); assertEquals(df.getLongs("long"), 2, 1, -2, 0, 1); } @Test public void testSortByBoolean() { // NOTE: boolean sorted should be stable df = df.addSeries("myseries", true, true, false, false, true ); df = df.sortedBy("myseries"); assertEquals(df.getLongs("index"), -2, 4, -1, 1, 3); assertEquals(df.getLongs("long"), 0, 1, -2, 1, 2); } @Test public void testReverse() { // NOTE: uses separate reverse() implementation by each series df = df.reverse(); assertEquals(df.getLongs("index"), 3, 4, -2, 1, -1); assertEquals(df.getDoubles("double"), 1.3, 0.5, 0.0, -0.1, -2.1); assertEquals(df.getLongs("long"), 2, 1, 0, 1, -2); assertEquals(df.getStrings("string"), "0.13e1", "0.5", "0.0", "-1", "-2.3"); assertEquals(df.getBooleans("boolean"), TRUE, TRUE, FALSE, TRUE, TRUE); } @Test public void testAppendLongDouble() { Series s = df.get("long").append(df.get("double")); Assert.assertEquals(s.type(), Series.SeriesType.LONG); assertEquals(s.getLongs(), -2, 1, 0, 1, 2, -2, 0, 0, 0, 1); } @Test public void testAppendLongBoolean() { Series s = df.get("long").append(df.get("boolean")); Assert.assertEquals(s.type(), Series.SeriesType.LONG); assertEquals(s.getLongs(), -2, 1, 0, 1, 2, 1, 1, 0, 1, 1); } @Test public void testAppendLongString() { Series s = df.get("long").append(df.get("string")); Assert.assertEquals(s.type(), Series.SeriesType.LONG); assertEquals(s.getLongs(), -2, 1, 0, 1, 2, -2, -1, 0, 0, 1); } @Test public void testLongGroupByIntervalEmpty() { Assert.assertTrue(DataFrame.toSeries(new long[0]).groupByInterval(1).isEmpty()); } @Test(expectedExceptions = IllegalArgumentException.class) public void testLongGroupByIntervalFailZero() { DataFrame.toSeries(-1).groupByInterval(0); } @Test public void testLongGroupByInterval() { LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19, 20); Series.SeriesGrouping grouping = in.groupByInterval(4); Assert.assertEquals(grouping.size(), 6); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 3 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] {}); Assert.assertEquals(grouping.buckets.get(3).fromIndex, new int[] { 1, 2 }); Assert.assertEquals(grouping.buckets.get(4).fromIndex, new int[] { 4 }); Assert.assertEquals(grouping.buckets.get(5).fromIndex, new int[] { 5 }); } @Test public void testLongGroupByCountEmpty() { Assert.assertTrue(DataFrame.toSeries(new long[0]).groupByCount(1).isEmpty()); } @Test(expectedExceptions = IllegalArgumentException.class) public void testLongGroupByCountFailZero() { DataFrame.toSeries(-1).groupByCount(0); } @Test public void testLongGroupByCountAligned() { LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19, 20); Series.SeriesGrouping grouping = in.groupByCount(3); Assert.assertEquals(grouping.size(), 2); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0, 1, 2 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 3, 4, 5 }); } @Test public void testLongBucketsByCountUnaligned() { LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19, 11, 12, 9); Series.SeriesGrouping grouping = in.groupByCount(3); Assert.assertEquals(grouping.size(), 3); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0, 1, 2 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 3, 4, 5 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] { 6, 7 }); } @Test public void testLongGroupByPartitionsEmpty() { Assert.assertTrue(DataFrame.toSeries(new long[0]).groupByPartitions(1).isEmpty()); } @Test(expectedExceptions = IllegalArgumentException.class) public void testLongGroupByPartitionsFailZero() { DataFrame.toSeries(-1).groupByPartitions(0); } @Test public void testLongGroupByPartitionsAligned() { LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19, 20, 5, 5, 8, 1); Series.SeriesGrouping grouping = in.groupByPartitions(5); Assert.assertEquals(grouping.size(), 5); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0, 1 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 2, 3 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] { 4, 5 }); Assert.assertEquals(grouping.buckets.get(3).fromIndex, new int[] { 6, 7 }); Assert.assertEquals(grouping.buckets.get(4).fromIndex, new int[] { 8, 9 }); } @Test public void testLongGroupByPartitionsUnaligned() { LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19, 20, 5, 5, 8, 1); Series.SeriesGrouping grouping = in.groupByPartitions(3); Assert.assertEquals(grouping.size(), 3); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0, 1, 2 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 3, 4, 5, 6 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] { 7, 8, 9 }); } @Test public void testLongGroupByPartitionsUnalignedSmall() { LongSeries in = DataFrame.toSeries(3, 15, 1); Series.SeriesGrouping grouping = in.groupByPartitions(7); Assert.assertEquals(grouping.size(), 7); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] {}); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 0 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] {}); Assert.assertEquals(grouping.buckets.get(3).fromIndex, new int[] { 1 }); Assert.assertEquals(grouping.buckets.get(4).fromIndex, new int[] {}); Assert.assertEquals(grouping.buckets.get(5).fromIndex, new int[] { 2 }); Assert.assertEquals(grouping.buckets.get(6).fromIndex, new int[] {}); } @Test public void testLongGroupByValueEmpty() { Assert.assertTrue(DataFrame.toSeries(new long[0]).groupByValue().isEmpty()); } @Test public void testLongGroupByValue() { LongSeries in = DataFrame.toSeries(3, 4, 5, 5, 3, 1, 5); Series.SeriesGrouping grouping = in.groupByValue(); Assert.assertEquals(grouping.size(), 4); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 5 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 0, 4 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] { 1 }); Assert.assertEquals(grouping.buckets.get(3).fromIndex, new int[] { 2, 3, 6 }); } @Test public void testLongGroupByMovingWindow() { LongSeries in = DataFrame.toSeries(3, 4, 5, 5, 3, 1, 5); Series.SeriesGrouping grouping = in.groupByMovingWindow(3); Assert.assertEquals(grouping.size(), 5); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0, 1, 2 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 1, 2, 3 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] { 2, 3, 4 }); Assert.assertEquals(grouping.buckets.get(3).fromIndex, new int[] { 3, 4, 5 }); Assert.assertEquals(grouping.buckets.get(4).fromIndex, new int[] { 4, 5, 6 }); } @Test public void testLongGroupByMovingWindowTooLarge() { LongSeries in = DataFrame.toSeries(3, 4, 5, 5, 3, 1, 5); Series.SeriesGrouping grouping = in.groupByMovingWindow(8); Assert.assertEquals(grouping.size(), 0); } @Test public void testLongGroupByMovingWindowAggregation() { LongSeries in = DataFrame.toSeries(3, 4, 5, 5, 3, 1, 5); Series.SeriesGrouping grouping = in.groupByMovingWindow(3); DataFrame out = grouping.aggregate(LongSeries.SUM); Assert.assertEquals(out.size(), 5); assertEquals(out.getLongs(Series.GROUP_KEY), 0, 1, 2, 3, 4); assertEquals(out.getLongs(Series.GROUP_VALUE), 12, 14, 13, 9, 9); } @Test public void testLongGroupByExpandingWindow() { LongSeries in = DataFrame.toSeries(3, 4, 5, 5); Series.SeriesGrouping grouping = in.groupByExpandingWindow(); Assert.assertEquals(grouping.size(), 4); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 0, 1 }); Assert.assertEquals(grouping.buckets.get(2).fromIndex, new int[] { 0, 1, 2 }); Assert.assertEquals(grouping.buckets.get(3).fromIndex, new int[] { 0, 1, 2, 3 }); } @Test public void testLongGroupByExpandingWindowAggregation() { LongSeries in = DataFrame.toSeries(3, 4, 5, 5); Series.SeriesGrouping grouping = in.groupByExpandingWindow(); DataFrame out = grouping.aggregate(LongSeries.SUM); Assert.assertEquals(out.size(), 4); assertEquals(out.getLongs(Series.GROUP_KEY), 0, 1, 2, 3); assertEquals(out.getLongs(Series.GROUP_VALUE), 3, 7, 12, 17); } @Test public void testBooleanGroupByValueEmpty() { Assert.assertTrue(DataFrame.toSeries(new boolean[0]).groupByValue().isEmpty()); } @Test public void testBooleanGroupByValue() { BooleanSeries in = DataFrame.toSeries(true, false, false, true, false, true, false); Series.SeriesGrouping grouping = in.groupByValue(); Assert.assertEquals(grouping.size(), 2); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 1, 2, 4, 6 }); Assert.assertEquals(grouping.buckets.get(1).fromIndex, new int[] { 0, 3, 5 }); } @Test public void testBooleanGroupByValueTrueOnly() { BooleanSeries in = DataFrame.toSeries(true, true, true); Series.SeriesGrouping grouping = in.groupByValue(); Assert.assertEquals(grouping.size(), 1); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0, 1, 2 }); } @Test public void testBooleanGroupByValueFalseOnly() { BooleanSeries in = DataFrame.toSeries(false, false, false); Series.SeriesGrouping grouping = in.groupByValue(); Assert.assertEquals(grouping.size(), 1); Assert.assertEquals(grouping.buckets.get(0).fromIndex, new int[] { 0, 1, 2 }); } @Test public void testLongAggregateSum() { Series keys = DataFrame.toSeries(3, 5, 7); LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19 ); List<Series.Bucket> buckets = new ArrayList<>(); buckets.add(new Series.Bucket(new int[] { 1, 3, 4 })); buckets.add(new Series.Bucket(new int[] {})); buckets.add(new Series.Bucket(new int[] { 0, 2 })); Series.SeriesGrouping grouping = new Series.SeriesGrouping(keys, in, buckets); DataFrame out = grouping.aggregate(new LongSeries.LongSum()); assertEquals(out.getLongs("key"), 3, 5, 7); assertEquals(out.getLongs("value"), 39, LNULL, 16); } @Test public void testLongAggregateLast() { Series keys = DataFrame.toSeries(3, 5, 7); LongSeries in = DataFrame.toSeries(3, 15, 13, 5, 19 ); List<Series.Bucket> buckets = new ArrayList<>(); buckets.add(new Series.Bucket(new int[] { 1, 3, 4 })); buckets.add(new Series.Bucket(new int[] {})); buckets.add(new Series.Bucket(new int[] { 0, 2 })); Series.SeriesGrouping grouping = new Series.SeriesGrouping(keys, in, buckets); DataFrame out = grouping.aggregate(new LongSeries.LongLast()); assertEquals(out.getLongs("key"), 3, 5, 7); assertEquals(out.getLongs("value"), 19, LNULL, 13); } @Test public void testLongGroupByAggregateEndToEnd() { LongSeries in = DataFrame.toSeries(0, 3, 12, 2, 4, 8, 5, 1, 7, 9, 6, 10, 11); Series.SeriesGrouping grouping = in.groupByInterval(4); Assert.assertEquals(grouping.size(), 4); DataFrame out = grouping.aggregate(new LongSeries.LongSum()); assertEquals(out.getLongs("key"), 0, 4, 8, 12); assertEquals(out.getLongs("value"), 6, 22, 38, 12); } @Test public void testAggregateWithoutData() { DoubleSeries s = DataFrame.toSeries(new double[0]); Assert.assertEquals(s.sum(), DNULL); } @Test public void testDoubleAggregateWithNull() { DoubleSeries s = DataFrame.toSeries(1.0, 2.0, DNULL, 4.0); Assert.assertEquals(s.sum(), 7.0); Assert.assertEquals(s.fillNull().sum(), 7.0); Assert.assertEquals(s.dropNull().sum(), 7.0); } @Test public void testLongAggregateWithNull() { LongSeries s = DataFrame.toSeries(1, 2, LNULL, 4); Assert.assertEquals(s.sum(), 7); Assert.assertEquals(s.fillNull().sum(), 7); Assert.assertEquals(s.dropNull().sum(), 7); } @Test public void testStringAggregateWithNull() { StringSeries s = DataFrame.toSeries("a", "b", SNULL, "d"); Assert.assertEquals(s.join(), "abd"); Assert.assertEquals(s.fillNull().join(), "abd"); Assert.assertEquals(s.dropNull().join(), "abd"); } @Test public void testBooleanAggregateWithNull() { BooleanSeries s = DataFrame.toSeries(TRUE, FALSE, BNULL, TRUE); Assert.assertEquals(s.aggregate(BooleanSeries.HAS_TRUE).value(), TRUE); Assert.assertEquals(s.fillNull().aggregate(BooleanSeries.HAS_TRUE).value(), 1); Assert.assertEquals(s.dropNull().aggregate(BooleanSeries.HAS_TRUE).value(), 1); } @Test public void testDataFrameGroupBy() { DataFrame.DataFrameGrouping grouping = df.groupByValue("boolean"); DoubleSeries ds = grouping.aggregate("double", new DoubleSeries.DoubleSum()).getDoubles("double"); assertEquals(ds, 0.0, -0.4); LongSeries ls = grouping.aggregate("long", new LongSeries.LongSum()).get("long").getLongs(); assertEquals(ls, 0, 2); StringSeries ss = grouping.aggregate("string", new StringSeries.StringConcat("|")).get("string").getStrings(); assertEquals(ss, "0.0", "-2.3|-1|0.5|0.13e1"); } @Test public void testResampleEndToEnd() { df = df.resampledBy("index", 2, new DataFrame.ResampleLast()); Assert.assertEquals(df.size(), 4); Assert.assertEquals(df.getSeriesNames().size(), 5); assertEquals(df.getLongs("index"), -2, 0, 2, 4); assertEquals(df.getDoubles("double"), -2.1, -0.1, 1.3, 0.5); assertEquals(df.getLongs("long"), -2, 1, 2, 1); assertEquals(df.getStrings("string"), "-2.3", "-1", "0.13e1", "0.5"); assertEquals(df.getBooleans("boolean"), TRUE, TRUE, TRUE, TRUE); } @Test public void testStableMultiSortDoubleLong() { DataFrame mydf = new DataFrame(new long[] { 1, 2, 3, 4, 5, 6, 7, 8 }) .addSeries("double", 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0) .addSeries("long", 2, 2, 2, 2, 1, 1, 1, 1); DataFrame sdfa = mydf.sortedBy("double", "long"); assertEquals(sdfa.getLongs("index"), 5, 6, 1, 2, 7, 8, 3, 4); DataFrame sdfb = mydf.sortedBy("long", "double"); assertEquals(sdfb.getLongs("index"), 3, 4, 7, 8, 1, 2, 5, 6); } @Test public void testStableMultiSortStringBoolean() { DataFrame mydf = new DataFrame(new long[] { 1, 2, 3, 4, 5, 6, 7, 8 }) .addSeries("string", "a", "a", "b", "b", "a", "a", "b", "b") .addSeries("boolean", true, true, true, true, false, false, false, false); DataFrame sdfa = mydf.sortedBy("string", "boolean"); assertEquals(sdfa.getLongs("index"), 5, 6, 1, 2, 7, 8, 3, 4); DataFrame sdfb = mydf.sortedBy("boolean", "string"); assertEquals(sdfb.getLongs("index"), 3, 4, 7, 8, 1, 2, 5, 6); } @Test(expectedExceptions = IllegalArgumentException.class) public void testFilterUnequalLengthFail() { df.filter(DataFrame.toSeries(false, true)); } @Test public void testFilter() { df = df.filter(DataFrame.toSeries(true, false, true, true, false)); Assert.assertEquals(df.size(), 5); df = df.dropNull(); Assert.assertEquals(df.size(), 3); assertEquals(df.getLongs("index"),-1, -2, 4); assertEquals(df.getDoubles("double"), -2.1, 0.0, 0.5); assertEquals(df.getLongs("long"), -2, 0, 1); assertEquals(df.getStrings("string"),"-2.3", "0.0", "0.5"); assertEquals(df.getBooleans("boolean"), TRUE, FALSE, TRUE); } @Test public void testFilterAll() { df = df.filter(DataFrame.toSeries(true, true, true, true, true)); Assert.assertEquals(df.size(), 5); Assert.assertEquals(df.dropNull().size(), 5); } @Test public void testFilterNone() { df = df.filter(DataFrame.toSeries(false, false, false, false, false)); Assert.assertEquals(df.size(), 5); Assert.assertEquals(df.dropNull().size(), 0); } @Test public void testFilterNull() { df = df.filter(DataFrame.toSeries(BNULL, FALSE, TRUE, BNULL, FALSE)); Assert.assertEquals(df.size(), 5); Assert.assertEquals(df.dropNull().size(), 1); } @Test public void testRenameSeries() { df = df.renameSeries("double", "new"); df.getDoubles("new"); try { df.getDoubles("double"); Assert.fail(); } catch(IllegalArgumentException e) { // left blank } } @Test public void testRenameSeriesOverride() { df = df.renameSeries("double", "long"); assertEquals(df.getDoubles("long"), VALUES_DOUBLE); } @Test public void testContains() { Assert.assertTrue(df.contains("double")); Assert.assertFalse(df.contains("NOT_VALID")); } @Test public void testCopy() { DataFrame ndf = df.copy(); ndf.getDoubles("double").values()[0] = 100.0; Assert.assertNotEquals(df.getDoubles("double").first(), ndf.getDoubles("double").first()); ndf.getLongs("long").values()[0] = 100; Assert.assertNotEquals(df.getLongs("long").first(), ndf.getLongs("long").first()); ndf.getStrings("string").values()[0] = "other string"; Assert.assertNotEquals(df.getStrings("string").first(), ndf.getStrings("string").first()); ndf.getBooleans("boolean").values()[0] = 0; Assert.assertNotEquals(df.getBooleans("boolean").first(), ndf.getBooleans("boolean").first()); } @Test public void testDoubleHead() { DoubleSeries s = DataFrame.toSeries(VALUES_DOUBLE); assertEquals(s.head(0), new double[0]); assertEquals(s.head(3), Arrays.copyOfRange(VALUES_DOUBLE, 0, 3)); assertEquals(s.head(6), Arrays.copyOfRange(VALUES_DOUBLE, 0, 5)); } @Test public void testDoubleTail() { DoubleSeries s = DataFrame.toSeries(VALUES_DOUBLE); assertEquals(s.tail(0), new double[0]); assertEquals(s.tail(3), Arrays.copyOfRange(VALUES_DOUBLE, 2, 5)); assertEquals(s.tail(6), Arrays.copyOfRange(VALUES_DOUBLE, 0, 5)); } @Test public void testDoubleAccessorsEmpty() { DoubleSeries s = DoubleSeries.empty(); Assert.assertTrue(DoubleSeries.isNull(s.sum())); Assert.assertTrue(DoubleSeries.isNull(s.min())); Assert.assertTrue(DoubleSeries.isNull(s.max())); Assert.assertTrue(DoubleSeries.isNull(s.mean())); Assert.assertTrue(DoubleSeries.isNull(s.std())); try { s.first(); Assert.fail(); } catch(IllegalStateException expected) { // left blank } try { s.last(); Assert.fail(); } catch(IllegalStateException expected) { // left blank } try { s.value(); Assert.fail(); } catch(IllegalStateException expected) { // left blank } } @Test public void testLongHead() { LongSeries s = DataFrame.toSeries(VALUES_LONG); assertEquals(s.head(0), new long[0]); assertEquals(s.head(3), Arrays.copyOfRange(VALUES_LONG, 0, 3)); assertEquals(s.head(6), Arrays.copyOfRange(VALUES_LONG, 0, 5)); } @Test public void testLongTail() { LongSeries s = DataFrame.toSeries(VALUES_LONG); assertEquals(s.tail(0), new long[0]); assertEquals(s.tail(3), Arrays.copyOfRange(VALUES_LONG, 2, 5)); assertEquals(s.tail(6), Arrays.copyOfRange(VALUES_LONG, 0, 5)); } @Test public void testLongAccessorsEmpty() { LongSeries s = LongSeries.empty(); Assert.assertTrue(LongSeries.isNull(s.sum())); Assert.assertTrue(LongSeries.isNull(s.min())); Assert.assertTrue(LongSeries.isNull(s.max())); try { s.first(); Assert.fail(); } catch(IllegalStateException expected) { // left blank } try { s.last(); Assert.fail(); } catch(IllegalStateException expected) { // left blank } try { s.value(); Assert.fail(); } catch(IllegalStateException expected) { // left blank } } @Test public void testLongUnique() { LongSeries s1 = DataFrame.toSeries(new long[0]); assertEquals(s1.unique(), new long[0]); LongSeries s2 = DataFrame.toSeries(4, 5, 2, 1); assertEquals(s2.unique(), 1, 2, 4, 5); LongSeries s3 = DataFrame.toSeries(9, 1, 2, 3, 6, 1, 2, 9, 2, 7); assertEquals(s3.unique(), 1, 2, 3, 6, 7, 9); } @Test public void testDoubleUnique() { DoubleSeries s1 = DataFrame.toSeries(new double[] {}); assertEquals(s1.unique(), new double[0]); DoubleSeries s2 = DataFrame.toSeries(4.1, 5.2, 2.3, 1.4); assertEquals(s2.unique(), 1.4, 2.3, 4.1, 5.2); DoubleSeries s3 = DataFrame.toSeries(9.0, 1.1, 2.2, 3.0, 6.0, 1.1, 2.3, 9.0, 2.3, 7.0); assertEquals(s3.unique(), 1.1, 2.2, 2.3, 3.0, 6.0, 7.0, 9.0); } @Test public void testStringUnique() { StringSeries s1 = DataFrame.toSeries(new String[] {}); assertEquals(s1.unique(), new String[0]); StringSeries s2 = DataFrame.toSeries("a", "A", "b", "Cc"); Assert.assertEquals(new HashSet<>(s2.unique().toList()), new HashSet<>(Arrays.asList("a", "A", "b", "Cc"))); StringSeries s3 = DataFrame.toSeries("a", "A", "b", "Cc", "A", "cC", "a", "cC"); Assert.assertEquals(new HashSet<>(s3.unique().toList()), new HashSet<>(Arrays.asList("a", "A", "b", "Cc", "cC"))); } @Test public void testStringFillNull() { StringSeries s = DataFrame.toSeries("a", SNULL, SNULL, "b", SNULL); assertEquals(s.fillNull("N"), "a", "N", "N", "b", "N"); } @Test public void testStringShift() { StringSeries s1 = DataFrame.toSeries(VALUES_STRING); assertEquals(s1.shift(0), VALUES_STRING); StringSeries s2 = DataFrame.toSeries(VALUES_STRING); assertEquals(s2.shift(2), SNULL, SNULL, "-2.3", "-1", "0.0"); StringSeries s3 = DataFrame.toSeries(VALUES_STRING); assertEquals(s3.shift(4), SNULL, SNULL, SNULL, SNULL, "-2.3"); StringSeries s4 = DataFrame.toSeries(VALUES_STRING); assertEquals(s4.shift(-4), "0.13e1", SNULL, SNULL, SNULL, SNULL); StringSeries s5 = DataFrame.toSeries(VALUES_STRING); assertEquals(s5.shift(100), SNULL, SNULL, SNULL, SNULL, SNULL); StringSeries s6 = DataFrame.toSeries(VALUES_STRING); assertEquals(s6.shift(-100), SNULL, SNULL, SNULL, SNULL, SNULL); } @Test public void testDoubleMapNullConditional() { DoubleSeries in = DataFrame.toSeries(1.0, DNULL, 2.0); BooleanSeries out = in.map(new Series.DoubleConditional() { @Override public boolean apply(double... values) { return true; } }); assertEquals(out, TRUE, BNULL, TRUE); } @Test public void testLongMapNullConditional() { LongSeries in = DataFrame.toSeries(1, LNULL, 2); BooleanSeries out = in.map(new Series.LongConditional() { @Override public boolean apply(long... values) { return true; } }); assertEquals(out, TRUE, BNULL, TRUE); } @Test public void testStringMapNullConditional() { StringSeries in = DataFrame.toSeries("1.0", SNULL, "2.0"); BooleanSeries out = in.map(new Series.StringConditional() { @Override public boolean apply(String... values) { return true; } }); assertEquals(out, TRUE, BNULL, TRUE); } @Test public void testDoubleMapNullFunction() { DoubleSeries in = DataFrame.toSeries(1.0, DNULL, 2.0); DoubleSeries out = in.map(new DoubleSeries.DoubleFunction() { @Override public double apply(double... values) { return values[0] + 1.0; } }); assertEquals(out, 2.0, DNULL, 3.0); } @Test public void testLongMapNullFunction() { LongSeries in = DataFrame.toSeries(1, LNULL, 2); LongSeries out = in.map(new LongSeries.LongFunction() { @Override public long apply(long... values) { return values[0] + 1; } }); assertEquals(out, 2, LNULL, 3); } @Test public void testStringMapNullFunction() { StringSeries in = DataFrame.toSeries("1.0", SNULL, "2.0"); StringSeries out = in.map(new StringSeries.StringFunction() { @Override public String apply(String... values) { return values[0] + "+"; } }); assertEquals(out, "1.0+", SNULL, "2.0+"); } @Test public void testDropNullRows() { DataFrame mdf = new DataFrame(new long[] { 1, 2, 3, 4, 5, 6 }) .addSeries("double", 1.0, 2.0, DNULL, 4.0, 5.0, 6.0) .addSeries("long", LNULL, 2, 3, 4, 5, 6) .addSeries("string", "1.0", "2", "bbb", "true", SNULL, "aaa") .addSeries("boolean", true, true, false, false, false, false); DataFrame ddf = mdf.dropNull(); Assert.assertEquals(ddf.size(), 3); assertEquals(ddf.getLongs("index"), 2, 4, 6); assertEquals(ddf.getDoubles("double"), 2.0, 4.0, 6.0); assertEquals(ddf.getLongs("long"), 2, 4, 6); assertEquals(ddf.getStrings("string"), "2", "true", "aaa"); assertEquals(ddf.getBooleans("boolean"), TRUE, FALSE, FALSE); } @Test public void testDropNullRowsIdentity() { Assert.assertEquals(df.dropNull().size(), df.size()); } @Test public void testDropNullColumns() { DataFrame mdf = new DataFrame() .addSeries("double_null", 1.0, 2.0, DNULL) .addSeries("double", 1.0, 2.0, 3.0) .addSeries("long_null", LNULL, 2, 3) .addSeries("long", 1, 2, 3) .addSeries("string_null", "true", SNULL, "aaa") .addSeries("string", "true", "this", "aaa") .addSeries("boolean", true, true, false); DataFrame ddf = mdf.dropNullColumns(); Assert.assertEquals(ddf.size(), 3); Assert.assertEquals(new HashSet<>(ddf.getSeriesNames()), new HashSet<>(Arrays.asList("double", "long", "string", "boolean"))); } @Test public void testMapExpression() { DoubleSeries s = df.map("(double * 2 + long + boolean) / 2"); assertEquals(s, -2.6, 0.9, 0.0, 1.5, 2.8); } @Test public void testMapExpressionNull() { DataFrame mdf = new DataFrame(VALUES_LONG) .addSeries("null", 1.0, 1.0, DNULL, 1.0, 1.0); DoubleSeries out = mdf.map("null + 1"); assertEquals(out, 2.0, 2.0, DNULL, 2.0, 2.0); } @Test public void testMapExpressionOtherNullPass() { DataFrame mdf = new DataFrame(VALUES_LONG) .addSeries("null", 1.0, 1.0, DNULL, 1.0, 1.0) .addSeries("notnull", 1.0, 1.0, 1.0, 1.0, 1.0); mdf.map("notnull + 1"); } @Test public void testMapExpressionWithNull() { DataFrame mdf = new DataFrame(VALUES_LONG) .addSeries("null", 1.0, 1.0, DNULL, 1.0, 1.0); DoubleSeries s = mdf.map("null + 1"); assertEquals(s, 2.0, 2.0, DNULL, 2.0, 2.0); } @Test public void testSeriesEquals() { Assert.assertTrue(DataFrame.toSeries(0.0, 3.0, 4.0).equals(DataFrame.toSeries(0.0, 3.0, 4.0))); Assert.assertTrue(DataFrame.toSeries(0, 3, 4).equals(DataFrame.toSeries(0, 3, 4))); Assert.assertTrue(DataFrame.toSeries(false, true, true).equals(DataFrame.toSeries(false, true, true))); Assert.assertTrue(DataFrame.toSeries("1", "3", "4").equals(DataFrame.toSeries("1", "3", "4"))); Assert.assertFalse(DataFrame.toSeries(0.0, 3.0, 4.0).equals(DataFrame.toSeries(0, 3, 4))); Assert.assertFalse(DataFrame.toSeries(0, 3, 4).equals(DataFrame.toSeries(0.0, 3.0, 4.0))); Assert.assertFalse(DataFrame.toSeries(false, true, true).equals(DataFrame.toSeries("0", "1", "1"))); Assert.assertFalse(DataFrame.toSeries("1", "3", "4").equals(DataFrame.toSeries(1, 3, 4))); Assert.assertTrue(DataFrame.toSeries(0.0, 3.0, 4.0).equals(DataFrame.toSeries(0, 3, 4).getDoubles())); Assert.assertTrue(DataFrame.toSeries(0, 3, 4).equals(DataFrame.toSeries(0.0, 3.0, 4.0).getLongs())); Assert.assertTrue(DataFrame.toSeries(false, true, true).equals(DataFrame.toSeries("0", "1", "1").getBooleans())); Assert.assertTrue(DataFrame.toSeries("1", "3", "4").equals(DataFrame.toSeries(1, 3, 4).getStrings())); } @Test public void testLongJoinInner() { Series sLeft = DataFrame.toSeries(4, 3, 1, 2); Series sRight = DataFrame.toSeries(5, 4, 3, 3, 0); List<Series.JoinPair> pairs = sLeft.join(sRight, Series.JoinType.INNER); Assert.assertEquals(pairs.size(), 3); Assert.assertEquals(pairs.get(0), new Series.JoinPair(1, 2)); Assert.assertEquals(pairs.get(1), new Series.JoinPair(1, 3)); Assert.assertEquals(pairs.get(2), new Series.JoinPair(0, 1)); } @Test public void testLongJoinLeft() { Series sLeft = DataFrame.toSeries(4, 3, 1, 2); Series sRight = DataFrame.toSeries(5, 4, 3, 3, 0); List<Series.JoinPair> pairs = sLeft.join(sRight, Series.JoinType.LEFT); Assert.assertEquals(pairs.size(), 5); Assert.assertEquals(pairs.get(0), new Series.JoinPair(2, -1)); Assert.assertEquals(pairs.get(1), new Series.JoinPair(3, -1)); Assert.assertEquals(pairs.get(2), new Series.JoinPair(1, 2)); Assert.assertEquals(pairs.get(3), new Series.JoinPair(1, 3)); Assert.assertEquals(pairs.get(4), new Series.JoinPair(0, 1)); } @Test public void testLongJoinRight() { Series sLeft = DataFrame.toSeries(4, 3, 1, 2); Series sRight = DataFrame.toSeries(5, 4, 3, 3, 0); List<Series.JoinPair> pairs = sLeft.join(sRight, Series.JoinType.RIGHT); Assert.assertEquals(pairs.size(), 5); Assert.assertEquals(pairs.get(0), new Series.JoinPair(-1, 4)); Assert.assertEquals(pairs.get(1), new Series.JoinPair(1, 2)); Assert.assertEquals(pairs.get(2), new Series.JoinPair(1, 3)); Assert.assertEquals(pairs.get(3), new Series.JoinPair(0, 1)); Assert.assertEquals(pairs.get(4), new Series.JoinPair(-1, 0)); } @Test public void testLongJoinOuter() { Series sLeft = DataFrame.toSeries(4, 3, 1, 2); Series sRight = DataFrame.toSeries(5, 4, 3, 3, 0); List<Series.JoinPair> pairs = sLeft.join(sRight, Series.JoinType.OUTER); Assert.assertEquals(pairs.size(), 7); Assert.assertEquals(pairs.get(0), new Series.JoinPair(-1, 4)); Assert.assertEquals(pairs.get(1), new Series.JoinPair(2, -1)); Assert.assertEquals(pairs.get(2), new Series.JoinPair(3, -1)); Assert.assertEquals(pairs.get(3), new Series.JoinPair(1, 2)); Assert.assertEquals(pairs.get(4), new Series.JoinPair(1, 3)); Assert.assertEquals(pairs.get(5), new Series.JoinPair(0, 1)); Assert.assertEquals(pairs.get(6), new Series.JoinPair(-1, 0)); } @Test public void testLongDoubleJoinInner() { Series sLeft = DataFrame.toSeries(4, 3, 1, 2); Series sRight = DataFrame.toSeries(5.0, 4.0, 3.0, 3.0, 0.0); List<Series.JoinPair> pairs = sLeft.join(sRight, Series.JoinType.INNER); Assert.assertEquals(pairs.size(), 3); Assert.assertEquals(pairs.get(0), new Series.JoinPair(1, 2)); Assert.assertEquals(pairs.get(1), new Series.JoinPair(1, 3)); Assert.assertEquals(pairs.get(2), new Series.JoinPair(0, 1)); } @Test public void testStringJoinInner() { Series sLeft = DataFrame.toSeries("4", "3", "1", "2"); Series sRight = DataFrame.toSeries("5", "4", "3", "3", "0"); List<Series.JoinPair> pairs = sLeft.join(sRight, Series.JoinType.INNER); Assert.assertEquals(pairs.size(), 3); Assert.assertEquals(pairs.get(0), new Series.JoinPair(1, 2)); Assert.assertEquals(pairs.get(1), new Series.JoinPair(1, 3)); Assert.assertEquals(pairs.get(2), new Series.JoinPair(0, 1)); } @Test public void testBooleanJoinInner() { Series sLeft = DataFrame.toSeries(true, false, false); Series sRight = DataFrame.toSeries(false, true, true); List<Series.JoinPair> pairs = sLeft.join(sRight, Series.JoinType.INNER); Assert.assertEquals(pairs.size(), 4); Assert.assertEquals(pairs.get(0), new Series.JoinPair(1, 0)); Assert.assertEquals(pairs.get(1), new Series.JoinPair(2, 0)); Assert.assertEquals(pairs.get(2), new Series.JoinPair(0, 1)); Assert.assertEquals(pairs.get(3), new Series.JoinPair(0, 2)); } @Test public void testJoinInner() { DataFrame left = new DataFrame() .addSeries("leftKey", 4, 2, 1, 3) .addSeries("leftValue", "a", "d", "c", "b"); DataFrame right = new DataFrame() .addSeries("rightKey", 5.0, 2.0, 1.0, 3.0, 1.0, 0.0) .addSeries("rightValue", "v", "z", "w", "x", "y", "u"); DataFrame joined = left.joinInner(right, "leftKey", "rightKey"); Assert.assertEquals(joined.size(), 4); Assert.assertEquals(joined.get("leftKey").type(), Series.SeriesType.LONG); Assert.assertEquals(joined.get("leftValue").type(), Series.SeriesType.STRING); Assert.assertEquals(joined.get("rightKey").type(), Series.SeriesType.DOUBLE); Assert.assertEquals(joined.get("rightValue").type(), Series.SeriesType.STRING); assertEquals(joined.getLongs("leftKey"), 1, 1, 2, 3); assertEquals(joined.getDoubles("rightKey"),1.0, 1.0, 2.0, 3.0); assertEquals(joined.getStrings("leftValue"), "c", "c", "d", "b"); assertEquals(joined.getStrings("rightValue"), "w", "y", "z", "x"); } @Test public void testJoinOuter() { DataFrame left = new DataFrame() .addSeries("leftKey", 4, 2, 1, 3) .addSeries("leftValue", "a", "d", "c", "b"); DataFrame right = new DataFrame() .addSeries("rightKey", 5.0, 2.0, 1.0, 3.0, 1.0, 0.0) .addSeries("rightValue", "v", "z", "w", "x", "y", "u"); DataFrame joined = left.joinOuter(right, "leftKey", "rightKey"); Assert.assertEquals(joined.size(), 7); Assert.assertEquals(joined.get("leftKey").type(), Series.SeriesType.LONG); Assert.assertEquals(joined.get("leftValue").type(), Series.SeriesType.STRING); Assert.assertEquals(joined.get("rightKey").type(), Series.SeriesType.DOUBLE); Assert.assertEquals(joined.get("rightValue").type(), Series.SeriesType.STRING); assertEquals(joined.getLongs("leftKey"), LNULL, 1, 1, 2, 3, 4, LNULL); assertEquals(joined.getDoubles("rightKey"), 0.0, 1.0, 1.0, 2.0, 3.0, DNULL, 5.0); assertEquals(joined.getStrings("leftValue"), SNULL, "c", "c", "d", "b", "a", SNULL); assertEquals(joined.getStrings("rightValue"), "u", "w", "y", "z", "x", SNULL, "v"); } @Test public void testJoinSameName() { DataFrame left = new DataFrame() .addSeries("name", 1, 2, 3, 4) .addSeries("value", 1, 2, 3, 4) .addSeries("left", 1, 2, 3, 4); DataFrame right = new DataFrame() .addSeries("name", 3, 4, 5, 6) .addSeries("value", 3, 4, 5, 6) .addSeries("right", 1, 2, 3, 4); DataFrame df = left.joinInner(right, "name", "name"); Assert.assertEquals(df.getSeriesNames().size(), 5); Assert.assertTrue(df.contains("name")); Assert.assertFalse(df.contains("name" + DataFrame.COLUMN_JOIN_LEFT)); Assert.assertFalse(df.contains("name" + DataFrame.COLUMN_JOIN_RIGHT)); Assert.assertFalse(df.contains("value")); Assert.assertTrue(df.contains("value" + DataFrame.COLUMN_JOIN_LEFT)); Assert.assertTrue(df.contains("value" + DataFrame.COLUMN_JOIN_RIGHT)); Assert.assertTrue(df.contains("left")); Assert.assertFalse(df.contains("left" + DataFrame.COLUMN_JOIN_LEFT)); Assert.assertFalse(df.contains("left" + DataFrame.COLUMN_JOIN_RIGHT)); Assert.assertTrue(df.contains("right")); Assert.assertFalse(df.contains("right" + DataFrame.COLUMN_JOIN_LEFT)); Assert.assertFalse(df.contains("right" + DataFrame.COLUMN_JOIN_RIGHT)); } @Test public void testJoinDifferentName() { DataFrame left = new DataFrame() .addSeries("name", 1, 2, 3, 4); DataFrame right = new DataFrame() .addSeries("key", 3, 4, 5, 6); DataFrame df = left.joinInner(right, "name", "key"); Assert.assertEquals(df.getSeriesNames().size(), 2); Assert.assertTrue(df.contains("name")); Assert.assertTrue(df.contains("key")); } @Test(expectedExceptions = IllegalArgumentException.class) public void testJoinIndexFailNoIndex() { DataFrame dfIndex = new DataFrame(5); DataFrame dfNoIndex = new DataFrame().addSeries(DataFrame.COLUMN_INDEX_DEFAULT, DataFrame.toSeries(VALUES_DOUBLE)); dfIndex.joinOuter(dfNoIndex); } @Test public void testJoinIndex() { DataFrame dfLeft = new DataFrame(5).addSeries("one", 5, 4, 3, 2, 1); DataFrame dfRight = new DataFrame(3).addSeries("two", "A", "B", "C"); DataFrame joined = dfLeft.joinLeft(dfRight); assertEquals(joined.getStrings("one"), "5", "4", "3", "2", "1"); assertEquals(joined.getStrings("two"), "A", "B", "C", null, null); } @Test public void testAddSeriesFromDataFrame() { DataFrame dfLeft = new DataFrame(6) .addSeries("one", 5, 4, 3, 2, 1, 0); DataFrame dfRight = new DataFrame(5) .addSeries("two", 11, 12, 13, 14, 15) .addSeries("three", 22, 23, 24, 25, 26) .addSeries("four", 1, 1, 1, 1, 1); dfLeft.addSeries(dfRight, "two", "three"); assertEquals(dfLeft.getLongs("two"), 11, 12, 13, 14, 15, LNULL); assertEquals(dfLeft.getLongs("three"), 22, 23, 24, 25, 26, LNULL); Assert.assertTrue(dfLeft.contains("one")); Assert.assertTrue(!dfLeft.contains("four")); } @Test public void testAddSeriesFromDataFrameFast() { DataFrame dfLeft = new DataFrame(5) .addSeries("one", 5, 4, 3, 2, 1); DataFrame dfRight = new DataFrame(5) .addSeries("two", 11, 12, 13, 14, 15); Assert.assertEquals(dfLeft.getIndex(), dfRight.getIndex()); dfLeft.addSeries(dfRight); assertEquals(dfLeft.getLongs("two"), 11, 12, 13, 14, 15); Assert.assertTrue(dfLeft.contains("one")); } @Test(expectedExceptions = IllegalArgumentException.class) public void testAddSeriesFromDataFrameFailIndexSource() { DataFrame dfLeft = new DataFrame(5); DataFrame dfRight = new DataFrame(); dfLeft.addSeries(dfRight); } @Test(expectedExceptions = IllegalArgumentException.class) public void testAddSeriesFromDataFrameFailIndexDestination() { DataFrame dfLeft = new DataFrame(); DataFrame dfRight = new DataFrame(5); dfLeft.addSeries(dfRight); } @Test(expectedExceptions = IllegalArgumentException.class) public void testAddSeriesFromDataFrameFailMissingSeries() { DataFrame dfLeft = new DataFrame(5); DataFrame dfRight = new DataFrame(5); dfLeft.addSeries(dfRight, "missing"); } @Test(expectedExceptions = IllegalArgumentException.class) public void testAddSeriesFromDataFrameFailNonUniqueMapping() { DataFrame dfLeft = new DataFrame(3) .addSeries("one", 1, 2, 3); DataFrame dfRight = new DataFrame(1, 1, 3) .addSeries("two", 10, 11, 12); dfLeft.addSeries(dfRight); } @Test public void testBooleanHasTrueFalseNull() { BooleanSeries s1 = DataFrame.toSeries(new boolean[0]); Assert.assertFalse(s1.hasFalse()); Assert.assertFalse(s1.hasTrue()); Assert.assertFalse(s1.hasNull()); BooleanSeries s2 = DataFrame.toSeries(true, true); Assert.assertFalse(s2.hasFalse()); Assert.assertTrue(s2.hasTrue()); Assert.assertFalse(s2.hasNull()); BooleanSeries s3 = DataFrame.toSeries(false, false); Assert.assertTrue(s3.hasFalse()); Assert.assertFalse(s3.hasTrue()); Assert.assertFalse(s3.hasNull()); BooleanSeries s4 = DataFrame.toSeries(true, false); Assert.assertTrue(s4.hasFalse()); Assert.assertTrue(s4.hasTrue()); Assert.assertFalse(s4.hasNull()); BooleanSeries s5 = DataFrame.toSeries(TRUE, FALSE, BNULL); Assert.assertTrue(s5.hasFalse()); Assert.assertTrue(s5.hasTrue()); Assert.assertTrue(s5.hasNull()); } @Test public void testBooleanAllTrueFalse() { BooleanSeries s1 = BooleanSeries.empty(); Assert.assertFalse(s1.allTrue()); Assert.assertFalse(s1.allFalse()); BooleanSeries s2 = DataFrame.toSeries(true, true); Assert.assertFalse(s2.allFalse()); Assert.assertTrue(s2.allTrue()); BooleanSeries s3 = DataFrame.toSeries(false, false); Assert.assertTrue(s3.allFalse()); Assert.assertFalse(s3.allTrue()); BooleanSeries s4 = DataFrame.toSeries(true, false); Assert.assertFalse(s4.allFalse()); Assert.assertFalse(s4.allTrue()); BooleanSeries s5 = DataFrame.toSeries(TRUE, TRUE, BNULL); Assert.assertFalse(s5.allFalse()); Assert.assertTrue(s5.allTrue()); BooleanSeries s6 = DataFrame.toSeries(FALSE, FALSE, BNULL); Assert.assertTrue(s6.allFalse()); Assert.assertFalse(s6.allTrue()); BooleanSeries s7 = DataFrame.toSeries(TRUE, FALSE, BNULL); Assert.assertFalse(s7.allFalse()); Assert.assertFalse(s7.allTrue()); } @Test public void testStringInferSeriesTypeDoubleDot() { Series.SeriesType t = StringSeries.buildFrom("1", "2", "3.", "", null).inferType(); Assert.assertEquals(t, Series.SeriesType.DOUBLE); } @Test public void testStringInferSeriesTypeDoubleExp() { Series.SeriesType t = StringSeries.buildFrom("1", "2e1", "3", "", null).inferType(); Assert.assertEquals(t, Series.SeriesType.DOUBLE); } @Test public void testStringInferSeriesTypeLong() { Series.SeriesType t = StringSeries.buildFrom("2", "-4", "-0", "", null).inferType(); Assert.assertEquals(t, Series.SeriesType.LONG); } @Test public void testStringInferSeriesTypeBoolean() { Series.SeriesType t = StringSeries.buildFrom("true", "False", "false", "", null).inferType(); Assert.assertEquals(t, Series.SeriesType.BOOLEAN); } @Test public void testStringInferSeriesTypeString() { Series.SeriesType t = StringSeries.buildFrom("true", "", "-0.2e1", null).inferType(); Assert.assertEquals(t, Series.SeriesType.STRING); } @Test public void testCompareInversion() { StringSeries string = StringSeries.buildFrom("0", "", "true"); BooleanSeries bool = BooleanSeries.buildFrom(FALSE, BNULL, TRUE); Assert.assertTrue(string.compare(bool, 0, 0) < 0); // "0" < "false" Assert.assertTrue(bool.compare(string, 0, 0) == 0); Assert.assertTrue(string.compare(bool, 1, 1) > 0); // "" > null Assert.assertTrue(bool.compare(string, 1, 1) == 0); Assert.assertTrue(string.compare(bool, 2, 2) == 0); Assert.assertTrue(bool.compare(string, 2, 2) == 0); } @Test public void testDataFrameFromCsv() throws IOException { Reader in = new InputStreamReader(this.getClass().getResourceAsStream("test.csv")); DataFrame df = DataFrame.fromCsv(in); Assert.assertEquals(df.getSeriesNames().size(), 3); Assert.assertEquals(df.size(), 6); Series a = df.get("header_A"); Assert.assertEquals(a.type(), Series.SeriesType.STRING); assertEquals(a.getStrings(), "a1", "A2", "two words", "", "with comma, semicolon; and more", ""); Series b = df.get("_1headerb"); Assert.assertEquals(b.type(), Series.SeriesType.LONG); assertEquals(b.getLongs(), 1, 2, 3, 4, 5, 6); Series c = df.get("Header_C"); Assert.assertEquals(c.type(), Series.SeriesType.BOOLEAN); assertEquals(c.getBooleans(), BNULL, TRUE, FALSE, FALSE, BNULL, TRUE); } @Test public void testDoubleFunctionConversion() { Series out = df.map(new Series.DoubleFunction() { @Override public double apply(double... values) { return values[0] + 1; } }, "long"); Assert.assertEquals(out.type(), Series.SeriesType.DOUBLE); } @Test public void testLongFunctionConversion() { Series out = df.map(new Series.LongFunction() { @Override public long apply(long... values) { return values[0] + 1; } }, "double"); Assert.assertEquals(out.type(), Series.SeriesType.LONG); } @Test public void testStringFunctionConversion() { Series out = df.map(new Series.StringFunction() { @Override public String apply(String... values) { return values[0] + "-"; } }, "long"); Assert.assertEquals(out.type(), Series.SeriesType.STRING); } @Test public void testBooleanFunctionConversion() { Series out = df.map(new Series.BooleanFunction() { @Override public boolean apply(boolean... values) { return !values[0]; } }, "long"); Assert.assertEquals(out.type(), Series.SeriesType.BOOLEAN); } @Test public void testBooleanFunctionExConversion() { Series out = df.map(new Series.BooleanFunctionEx() { @Override public byte apply(byte... values) { return TRUE; } }, "long"); Assert.assertEquals(out.type(), Series.SeriesType.BOOLEAN); } @Test public void testDoubleConditionalConversion() { Series out = df.map(new Series.DoubleConditional() { @Override public boolean apply(double... values) { return true; } }, "long"); Assert.assertEquals(out.type(), Series.SeriesType.BOOLEAN); } @Test public void testLongConditionalConversion() { Series out = df.map(new Series.LongConditional() { @Override public boolean apply(long... values) { return true; } }, "double"); Assert.assertEquals(out.type(), Series.SeriesType.BOOLEAN); } @Test public void testStringConditionalConversion() { Series out = df.map(new Series.StringConditional() { @Override public boolean apply(String... values) { return true; } }, "long"); Assert.assertEquals(out.type(), Series.SeriesType.BOOLEAN); } @Test public void testBooleanConditionalConversion() { Series out = df.map(new Series.BooleanConditional() { @Override public boolean apply(boolean... values) { return true; } }, "long"); Assert.assertEquals(out.type(), Series.SeriesType.BOOLEAN); } @Test public void testFillForward() { // must pass LongSeries.empty().fillNullForward(); // must pass LongSeries.buildFrom(LNULL).fillNullForward(); LongSeries in = LongSeries.buildFrom(LNULL, 1, LNULL, 2, 3, LNULL); assertEquals(in.fillNullForward(), LNULL, 1, 1, 2, 3, 3); } @Test public void testFillBackward() { // must pass LongSeries.empty().fillNullBackward(); // must pass LongSeries.buildFrom(LNULL).fillNullBackward(); LongSeries in = LongSeries.buildFrom(LNULL, 1, LNULL, 2, 3, LNULL); assertEquals(in.fillNullBackward(), 1, 1, 2, 2, 3, LNULL); } @Test(expectedExceptions = IllegalArgumentException.class) public void testIndexNone() { DataFrame df = new DataFrame(); Assert.assertFalse(df.hasIndex()); df.getIndex(); } @Test public void testIndexDefault() { Assert.assertTrue(new DataFrame(0).hasIndex()); Assert.assertTrue(new DataFrame(1, 2, 3).hasIndex()); Assert.assertTrue(new DataFrame(DataFrame.toSeries(VALUES_STRING)).hasIndex()); } @Test public void testIndexCopy() { DataFrame df = new DataFrame(5) .addSeries("test", DataFrame.toSeries(VALUES_BOOLEAN)) .setIndex("test"); Assert.assertEquals(df.copy().getIndexName(), "test"); } @Test(expectedExceptions = IllegalArgumentException.class) public void testIndexSetInvalid() { DataFrame df = new DataFrame(0); df.setIndex("test"); } @Test public void testIndexRename() { DataFrame df = new DataFrame(0); Series index = df.getIndex(); df.renameSeries(df.getIndexName(), "test"); df.addSeries(DataFrame.COLUMN_INDEX_DEFAULT, DataFrame.toSeries(new double[0])); Assert.assertEquals(df.getIndexName(), "test"); Assert.assertEquals(df.getIndex(), index); } @Test public void testDoubleNormalize() { DoubleSeries s = DataFrame.toSeries(1.5, 2.0, 3.5).normalize(); assertEquals(s, 0, 0.25, 1.0); } @Test public void testDoubleNormalizeFailInvalid() { DoubleSeries s = DataFrame.toSeries(1.5, 1.5, 1.5).normalize(); assertEquals(s, DoubleSeries.nulls(3)); } @Test public void testDoubleZScore() { DoubleSeries s = DataFrame.toSeries(0.0, 1.0, 2.0).zscore(); assertEquals(s, -0.707, 0.0, 0.707); } @Test public void testDoubleZScoreFailInvalid() { DoubleSeries s = DataFrame.toSeries(1.5, 1.5, 1.5).zscore(); assertEquals(s, DoubleSeries.nulls(3)); } @Test public void testDoubleOperationsSeries() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); DoubleSeries mod = DataFrame.toSeries(1, 1, 1, 0, DNULL); assertEquals(base.add(mod), DNULL, 1, 2, 1.5, DNULL); assertEquals(base.subtract(mod), DNULL, -1, 0, 1.5, DNULL); assertEquals(base.multiply(mod), DNULL, 0, 1, 0, DNULL); assertEquals(base.divide(mod.replace(0, 1)), DNULL, 0, 1, 1.5, DNULL); assertEquals(base.pow(mod), DNULL, 0, 1, 1, DNULL); assertEquals(base.eq(mod), BNULL, FALSE, TRUE, FALSE, BNULL); try { base.divide(mod); Assert.fail(); } catch(ArithmeticException expected) { // left blank } } @Test public void testDoubleOperationsSeriesMisaligned() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); DoubleSeries mod = DataFrame.toSeries(1, 1, 1, DNULL); try { base.add(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.subtract(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.multiply(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.divide(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.eq(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } } @Test public void testDoubleOperationAddConstant() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); assertEquals(base.add(1), DNULL, 1, 2, 2.5, 1.003); assertEquals(base.add(0), DNULL, 0, 1, 1.5, 0.003); assertEquals(base.add(-1), DNULL, -1, 0, 0.5, -0.997); assertEquals(base.add(DNULL), DoubleSeries.nulls(5)); } @Test public void testDoubleOperationSubtractConstant() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); assertEquals(base.subtract(1), DNULL, -1, 0, 0.5, -0.997); assertEquals(base.subtract(0), DNULL, 0, 1, 1.5, 0.003); assertEquals(base.subtract(-1), DNULL, 1, 2, 2.5, 1.003); assertEquals(base.subtract(DNULL), DoubleSeries.nulls(5)); } @Test public void testDoubleOperationMultiplyConstant() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); assertEquals(base.multiply(1), DNULL, 0, 1, 1.5, 0.003); assertEquals(base.multiply(0), DNULL, 0, 0, 0, 0); assertEquals(base.multiply(-1), DNULL, 0, -1, -1.5, -0.003); assertEquals(base.multiply(DNULL), DoubleSeries.nulls(5)); } @Test public void testDoubleOperationDivideConstant() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); assertEquals(base.divide(1), DNULL, 0, 1, 1.5, 0.003); assertEquals(base.divide(-1), DNULL, 0, -1, -1.5, -0.003); assertEquals(base.divide(DNULL), DoubleSeries.nulls(5)); try { base.divide(0); Assert.fail(); } catch(ArithmeticException expected) { // left blank } } @Test public void testDoubleOperationPowConstant() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); assertEquals(base.pow(1), DNULL, 0, 1, 1.5, 0.003); assertEquals(base.pow(0), DNULL, 1, 1, 1, 1); assertEquals(base.pow(-1), DNULL, DoubleSeries.INFINITY, 1, 1 / 1.5, 1 / 0.003); assertEquals(base.pow(DNULL), DoubleSeries.nulls(5)); } @Test public void testDoubleOperationEqConstant() { DoubleSeries base = DataFrame.toSeries(DNULL, 0, 1, 1.5, 0.003); assertEquals(base.eq(1), BNULL, FALSE, TRUE, FALSE, FALSE); assertEquals(base.eq(0), BNULL, TRUE, FALSE, FALSE, FALSE); assertEquals(base.eq(-1), BNULL, FALSE, FALSE, FALSE, FALSE); assertEquals(base.eq(DNULL), BooleanSeries.nulls(5)); } @Test public void testDoubleCount() { DoubleSeries base = DataFrame.toSeries(DNULL, 1, 1, 1.5, 0.003); Assert.assertEquals(base.count(1), 2); Assert.assertEquals(base.count(2), 0); Assert.assertEquals(base.count(DNULL), 1); } @Test public void testDoubleContains() { DoubleSeries base = DataFrame.toSeries(DNULL, 1, 1, 1.5, 0.003); Assert.assertTrue(base.contains(1)); Assert.assertFalse(base.contains(2)); Assert.assertTrue(base.contains(DNULL)); } @Test public void testDoubleReplace() { DoubleSeries base = DataFrame.toSeries(DNULL, 1, 1, 1.5, 0.003); assertEquals(base.replace(1, 2), DNULL, 2, 2, 1.5, 0.003); assertEquals(base.replace(1, DNULL), DNULL, DNULL, DNULL, 1.5, 0.003); assertEquals(base.replace(2, 1), DNULL, 1, 1, 1.5, 0.003); assertEquals(base.replace(1.5, DNULL), DNULL, 1, 1, DNULL, 0.003); assertEquals(base.replace(DNULL, 1), 1, 1, 1, 1.5, 0.003); } @Test public void testDoubleFilterSeries() { DoubleSeries base = DataFrame.toSeries(DNULL, 1, 1, 1.5, 0.003); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.filter(mod), DNULL, 1, 1, DNULL, DNULL); } @Test public void testDoubleFilterConditional() { DoubleSeries base = DataFrame.toSeries(DNULL, 1, 1, 1.5, 0.003); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.filter(new Series.DoubleConditional() { @Override public boolean apply(double... values) { return (values[0] >= 1 && values[0] < 1.5) || values[0] == 0.003; } }), DNULL, 1, 1, DNULL, 0.003); } @Test public void testLongOperationsSeries() { LongSeries base = DataFrame.toSeries(LNULL, 0, 1, 5, 10); LongSeries mod = DataFrame.toSeries(1, 1, 1, 0, LNULL); assertEquals(base.add(mod), LNULL, 1, 2, 5, LNULL); assertEquals(base.subtract(mod), LNULL, -1, 0, 5, LNULL); assertEquals(base.multiply(mod), LNULL, 0, 1, 0, LNULL); assertEquals(base.divide(mod.replace(0, 1)), LNULL, 0, 1, 5, LNULL); assertEquals(base.eq(mod), BNULL, FALSE, TRUE, FALSE, BNULL); try { base.divide(mod); Assert.fail(); } catch(ArithmeticException expected) { // left blank } } @Test public void testLongOperationsSeriesMisaligned() { LongSeries base = DataFrame.toSeries(LNULL, 0, 1, 5, 10); LongSeries mod = DataFrame.toSeries(1, 1, 1, LNULL); try { base.add(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.subtract(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.multiply(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.divide(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.eq(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } } @Test public void testLongOperationAddConstant() { LongSeries base = DataFrame.toSeries(LNULL, 0, 1, 5, 10); assertEquals(base.add(1), LNULL, 1, 2, 6, 11); assertEquals(base.add(0), LNULL, 0, 1, 5, 10); assertEquals(base.add(-1), LNULL, -1, 0, 4, 9); assertEquals(base.add(LNULL), LongSeries.nulls(5)); } @Test public void testLongOperationSubtractConstant() { LongSeries base = DataFrame.toSeries(LNULL, 0, 1, 5, 10); assertEquals(base.subtract(1), LNULL, -1, 0, 4, 9); assertEquals(base.subtract(0), LNULL, 0, 1, 5, 10); assertEquals(base.subtract(-1), LNULL, 1, 2, 6, 11); assertEquals(base.subtract(LNULL), LongSeries.nulls(5)); } @Test public void testLongOperationMultiplyConstant() { LongSeries base = DataFrame.toSeries(LNULL, 0, 1, 5, 10); assertEquals(base.multiply(1), LNULL, 0, 1, 5, 10); assertEquals(base.multiply(0), LNULL, 0, 0, 0, 0); assertEquals(base.multiply(-1), LNULL, 0, -1, -5, -10); assertEquals(base.multiply(LNULL), LongSeries.nulls(5)); } @Test public void testLongOperationDivideConstant() { LongSeries base = DataFrame.toSeries(LNULL, 0, 1, 5, 10); assertEquals(base.divide(1), LNULL, 0, 1, 5, 10); assertEquals(base.divide(-1), LNULL, 0, -1, -5, -10); assertEquals(base.divide(LNULL), LongSeries.nulls(5)); try { base.divide(0); Assert.fail(); } catch(ArithmeticException expected) { // left blank } } @Test public void testLongOperationEqConstant() { LongSeries base = DataFrame.toSeries(LNULL, 0, 1, 5, 10); assertEquals(base.eq(1), BNULL, FALSE, TRUE, FALSE, FALSE); assertEquals(base.eq(0), BNULL, TRUE, FALSE, FALSE, FALSE); assertEquals(base.eq(-1), BNULL, FALSE, FALSE, FALSE, FALSE); assertEquals(base.eq(LNULL), BooleanSeries.nulls(5)); } @Test public void testLongCount() { LongSeries base = DataFrame.toSeries(LNULL, 0, 0, 5, 10); Assert.assertEquals(base.count(0), 2); Assert.assertEquals(base.count(2), 0); Assert.assertEquals(base.count(LNULL), 1); } @Test public void testLongContains() { LongSeries base = DataFrame.toSeries(LNULL, 0, 0, 5, 10); Assert.assertTrue(base.contains(0)); Assert.assertFalse(base.contains(2)); Assert.assertTrue(base.contains(LNULL)); } @Test public void testLongReplace() { LongSeries base = DataFrame.toSeries(LNULL, 0, 0, 5, 10); assertEquals(base.replace(0, 1), LNULL, 1, 1, 5, 10); assertEquals(base.replace(0, LNULL), LNULL, LNULL, LNULL, 5, 10); assertEquals(base.replace(2, 1), LNULL, 0, 0, 5, 10); assertEquals(base.replace(5, LNULL), LNULL, 0, 0, LNULL, 10); assertEquals(base.replace(LNULL, 1), 1, 0, 0, 5, 10); } @Test public void testLongFilterSeries() { LongSeries base = DataFrame.toSeries(LNULL, 0, 0, 5, 10); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.filter(mod), LNULL, 0, 0, LNULL, LNULL); } @Test public void testLongFilterConditional() { LongSeries base = DataFrame.toSeries(LNULL, 0, 0, 5, 10); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.filter(new Series.LongConditional() { @Override public boolean apply(long... values) { return values[0] >= 0 && values[0] <= 5; } }), LNULL, 0, 0, 5, LNULL); } @Test public void testStringOperationsSeries() { StringSeries base = DataFrame.toSeries(SNULL, "a", "b", "c", "d"); StringSeries mod = DataFrame.toSeries("A", "A", "b", "B", SNULL); assertEquals(base.concat(mod), SNULL, "aA", "bb", "cB", SNULL); assertEquals(base.eq(mod), BNULL, FALSE, TRUE, FALSE, BNULL); } @Test public void testStringOperationsSeriesMisaligned() { StringSeries base = DataFrame.toSeries(SNULL, "a", "b", "c", "d"); StringSeries mod = DataFrame.toSeries("A", "A", "b", SNULL); try { base.concat(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.eq(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } } @Test public void testStringOperationConcatConstant() { StringSeries base = DataFrame.toSeries(SNULL, "a", "b", "c", "d"); assertEquals(base.concat("X"), SNULL, "aX", "bX", "cX", "dX"); assertEquals(base.concat(""), SNULL, "a", "b", "c", "d"); assertEquals(base.concat(SNULL), StringSeries.nulls(5)); } @Test public void testStringOperationEqConstant() { StringSeries base = DataFrame.toSeries(SNULL, "a", "b", "c", "d"); assertEquals(base.eq("a"), BNULL, TRUE, FALSE, FALSE, FALSE); assertEquals(base.eq("b"), BNULL, FALSE, TRUE, FALSE, FALSE); assertEquals(base.eq(""), BNULL, FALSE, FALSE, FALSE, FALSE); assertEquals(base.eq(SNULL), BooleanSeries.nulls(5)); } @Test public void testStringCount() { StringSeries base = DataFrame.toSeries(SNULL, "a", "a", "b", "A"); Assert.assertEquals(base.count("a"), 2); Assert.assertEquals(base.count("d"), 0); Assert.assertEquals(base.count(SNULL), 1); } @Test public void testStringContains() { StringSeries base = DataFrame.toSeries(SNULL, "a", "a", "b", "A"); Assert.assertTrue(base.contains("a")); Assert.assertFalse(base.contains("")); Assert.assertTrue(base.contains(SNULL)); } @Test public void testStringReplace() { StringSeries base = DataFrame.toSeries(SNULL, "a", "a", "b", "A"); assertEquals(base.replace("a", "AA"), SNULL, "AA", "AA", "b", "A"); assertEquals(base.replace("a", SNULL), SNULL, SNULL, SNULL, "b", "A"); assertEquals(base.replace("b", "B"), SNULL, "a", "a", "B", "A"); assertEquals(base.replace("", "X"), SNULL, "a", "a", "b", "A"); assertEquals(base.replace(SNULL, "N"), "N", "a", "a", "b", "A"); } @Test public void testStringFilterSeries() { StringSeries base = DataFrame.toSeries(SNULL, "a", "a", "b", "A"); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.filter(mod), SNULL, "a", "a", SNULL, SNULL); } @Test public void testStringFilterConditional() { StringSeries base = DataFrame.toSeries(SNULL, "a", "a", "b", "A"); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.filter(new Series.StringConditional() { @Override public boolean apply(String... values) { return values[0].equals("a") || values[0].equals("A"); } }), SNULL, "a", "a", SNULL, "A"); } @Test public void testBooleanOperationsSeries() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.and(mod), BNULL, TRUE, FALSE, FALSE, BNULL); assertEquals(base.or(mod), BNULL, TRUE, TRUE, TRUE, BNULL); assertEquals(base.xor(mod), BNULL, FALSE, TRUE, TRUE, BNULL); assertEquals(base.implies(mod), BNULL, TRUE, TRUE, FALSE, BNULL); assertEquals(base.eq(mod), BNULL, TRUE, FALSE, FALSE, BNULL); } @Test public void testBooleanOperationsSeriesMisaligned() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); BooleanSeries mod = DataFrame.toSeries(BNULL, TRUE, FALSE, BNULL); try { base.and(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.or(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.xor(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.implies(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } try { base.eq(mod); Assert.fail(); } catch(IllegalArgumentException expected) { // left blank } } @Test public void testBooleanOperationAndConstant() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.and(true), BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.and(false), BNULL, FALSE, FALSE, FALSE, FALSE); assertEquals(base.and(BNULL), BooleanSeries.nulls(5)); } @Test public void testBooleanOperationOrConstant() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.or(true), BNULL, TRUE, TRUE, TRUE, TRUE); assertEquals(base.or(false), BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.or(BNULL), BooleanSeries.nulls(5)); } @Test public void testBooleanOperationXorConstant() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.xor(true), BNULL, FALSE, TRUE, FALSE, TRUE); assertEquals(base.xor(false), BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.xor(BNULL), BooleanSeries.nulls(5)); } @Test public void testBooleanOperationImpliesConstant() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.implies(true), BNULL, TRUE, TRUE, TRUE, TRUE); assertEquals(base.implies(false), BNULL, FALSE, TRUE, FALSE, TRUE); assertEquals(base.implies(BNULL), BooleanSeries.nulls(5)); } @Test public void testBooleanOperationEqConstant() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.eq(true), BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.eq(false), BNULL, FALSE, TRUE, FALSE, TRUE); assertEquals(base.eq(BNULL), BooleanSeries.nulls(5)); } @Test public void testBooleanCount() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); Assert.assertEquals(base.count(TRUE), 2); Assert.assertEquals(base.count(FALSE), 2); Assert.assertEquals(base.count(BNULL), 1); } @Test public void testBooleanContains() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); Assert.assertTrue(base.contains(TRUE)); Assert.assertTrue(base.contains(FALSE)); Assert.assertTrue(base.contains(BNULL)); } @Test public void testBooleanReplace() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.replace(TRUE, FALSE), BNULL, FALSE, FALSE, FALSE, FALSE); assertEquals(base.replace(TRUE, BNULL), BNULL, BNULL, FALSE, BNULL, FALSE); assertEquals(base.replace(FALSE, TRUE), BNULL, TRUE, TRUE, TRUE, TRUE); assertEquals(base.replace(FALSE, BNULL), BNULL, TRUE, BNULL, TRUE, BNULL); assertEquals(base.replace(BNULL, TRUE), TRUE, TRUE, FALSE, TRUE, FALSE); } @Test public void testBooleanFilterSeries() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); BooleanSeries mod = DataFrame.toSeries(TRUE, TRUE, TRUE, FALSE, BNULL); assertEquals(base.filter(mod), BNULL, TRUE, FALSE, BNULL, BNULL); } public void testBooleanFilterConditional() { BooleanSeries base = DataFrame.toSeries(BNULL, TRUE, FALSE, TRUE, FALSE); assertEquals(base.filter(new Series.BooleanConditional() { @Override public boolean apply(boolean... values) { return values[0]; } }), BNULL, TRUE, BNULL, TRUE, BNULL); } @Test public void testAppend() { DataFrame base = new DataFrame(); base.addSeries("A", 1, 2, 3, 4); base.addSeries("B", "a", "b", "c", "d"); base.setIndex("B"); DataFrame other = new DataFrame(); other.addSeries("A", 5.0d, 6.3d, 7.1d); other.addSeries("C", true, true, false); DataFrame another = new DataFrame(); another.addSeries("C", false, false); DataFrame res = base.append(other, another); Assert.assertEquals(res.getSeriesNames(), new HashSet<>(Arrays.asList("A", "B"))); Assert.assertEquals(res.get("A").type(), Series.SeriesType.LONG); Assert.assertEquals(res.get("B").type(), Series.SeriesType.STRING); assertEquals(res.getLongs("A"), 1, 2, 3, 4, 5, 6, 7, LongSeries.NULL, LongSeries.NULL); assertEquals(res.getStrings("B"), "a", "b", "c", "d", null, null, null, null, null); } /* ************************************************************************** * Helpers ***************************************************************************/ static void assertEquals(Series actual, Series expected) { Assert.assertEquals(actual, expected); } static void assertEquals(DoubleSeries actual, double... expected) { assertEquals(actual.getDoubles().values(), expected); } static void assertEquals(double[] actual, double... expected) { if(actual.length != expected.length) Assert.fail(String.format("expected array length [%d] but found [%d]", actual.length, expected.length)); for(int i=0; i<actual.length; i++) { if(Double.isNaN(actual[i]) && Double.isNaN(expected[i])) continue; Assert.assertEquals(actual[i], expected[i], COMPARE_DOUBLE_DELTA, "index=" + i); } } static void assertEquals(LongSeries actual, long... expected) { assertEquals(actual.getLongs().values(), expected); } static void assertEquals(long[] actual, long... expected) { if(actual.length != expected.length) Assert.fail(String.format("expected array length [%d] but found [%d]", actual.length, expected.length)); for(int i=0; i<actual.length; i++) { Assert.assertEquals(actual[i], expected[i], "index=" + i); } } static void assertEquals(StringSeries actual, String... expected) { assertEquals(actual.getStrings().values(), expected); } static void assertEquals(String[] actual, String... expected) { if(actual.length != expected.length) Assert.fail(String.format("expected array length [%d] but found [%d]", actual.length, expected.length)); for(int i=0; i<actual.length; i++) { Assert.assertEquals(actual[i], expected[i], "index=" + i); } } static void assertEquals(BooleanSeries actual, byte... expected) { assertEquals(actual.getBooleans().values(), expected); } static void assertEquals(BooleanSeries actual, boolean... expected) { BooleanSeries s = actual.getBooleans(); if(s.hasNull()) Assert.fail("Encountered NULL when comparing against booleans"); assertEquals(s.valuesBoolean(), expected); } static void assertEquals(byte[] actual, byte... expected) { if(actual.length != expected.length) Assert.fail(String.format("expected array length [%d] but found [%d]", actual.length, expected.length)); for(int i=0; i<actual.length; i++) { Assert.assertEquals(actual[i], expected[i], "index=" + i); } } static void assertEquals(boolean[] actual, boolean... expected) { if(actual.length != expected.length) Assert.fail(String.format("expected array length [%d] but found [%d]", actual.length, expected.length)); for(int i=0; i<actual.length; i++) { Assert.assertEquals(actual[i], expected[i], "index=" + i); } } } <file_sep>/** * Copyright (C) 2014-2016 LinkedIn Corp. (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.core.realtime.impl.dictionary; import java.util.HashMap; import java.util.Map; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.Test; import com.linkedin.pinot.common.data.FieldSpec; /** * Tests for concurrent read and write against REALTIME dictionary. * <p>For now just test against {@link IntOnHeapMutableDictionary}. Index contiguous integers from 1 so that the index * for each value is deterministic. */ public class ConcurrentReadWriteDictionaryTest { private static final int NUM_ENTRIES = 1_000_000; private static final int NUM_READERS = 8; private static final ExecutorService EXECUTOR_SERVICE = Executors.newFixedThreadPool(NUM_READERS + 1); private static final long SEED = new Random().nextLong(); private static final Random RANDOM = new Random(SEED); @Test public void testSingleReaderSingleWriter() throws Exception { try { { MutableDictionary dictionary = new IntOnHeapMutableDictionary(); testSingleReaderSingleWriter(dictionary); dictionary.close(); } { MutableDictionary dictionary = new IntOffHeapMutableDictionary(NUM_ENTRIES / RANDOM.nextInt(NUM_ENTRIES/3), 2000); testSingleReaderSingleWriter(dictionary); dictionary.close(); } } catch (Throwable t) { t.printStackTrace(); Assert.fail("Failed with seed " + SEED); } } private void testSingleReaderSingleWriter(MutableDictionary dictionary) throws Exception { Future<Void> readerFuture = EXECUTOR_SERVICE.submit(new Reader(dictionary)); Future<Void> writerFuture = EXECUTOR_SERVICE.submit(new Writer(dictionary)); writerFuture.get(); readerFuture.get(); } @Test public void testMultiReadersSingleWriter() throws Exception { try { { MutableDictionary dictionary = new IntOnHeapMutableDictionary(); testMultiReadersSingleWriter(dictionary); dictionary.close(); } { MutableDictionary dictionary = new IntOffHeapMutableDictionary(NUM_ENTRIES / RANDOM.nextInt(NUM_ENTRIES/3), 2000); testMultiReadersSingleWriter(dictionary); dictionary.close(); } } catch (Throwable t) { t.printStackTrace(); Assert.fail("Failed with seed " + SEED); } } // A test to verify the functionality of off-heap int dictionary without concurrency private void testRealtimeDictionary(boolean onHeap) throws Exception { final int estCardinality = 943; final int numValues = estCardinality * 107; FieldSpec.DataType[] dataTypes = new FieldSpec.DataType[] {FieldSpec.DataType.INT, FieldSpec.DataType.LONG, FieldSpec.DataType.FLOAT, FieldSpec.DataType.DOUBLE}; int numEntries; final Map<Object, Integer> valueToDictId = new HashMap<>(); final int[] overflowSizes = new int[] {0, 2000}; for (FieldSpec.DataType dataType : dataTypes) { for (int overflowSize : overflowSizes) { MutableDictionary dictionary = makeDictionary(dataType, estCardinality, overflowSize, onHeap); valueToDictId.clear(); numEntries = 0; for (int i = 0; i < numValues; i++) { try { Object x = makeRandomNumber(dataType); if (valueToDictId.containsKey(x)) { Assert.assertEquals(Integer.valueOf(dictionary.indexOf(x)), valueToDictId.get(x)); } else { dictionary.index(x); int dictId = dictionary.indexOf(x); Assert.assertEquals(dictId, numEntries++); valueToDictId.put(x, dictId); } } catch (Throwable t) { t.printStackTrace(); Assert.fail("Failed with seed " + SEED + " iteration " + i + " for dataType " + dataType.toString() + " overflowsize=" + overflowSize); } } } } } @Test public void testRealtimeDictionary() throws Exception { testRealtimeDictionary(true); testRealtimeDictionary(false); } private MutableDictionary makeDictionary(FieldSpec.DataType dataType, final int estCaridinality, int maxOverflowSize, boolean onHeap) { switch (dataType) { case INT: if (onHeap) { return new IntOnHeapMutableDictionary(); } return new IntOffHeapMutableDictionary(estCaridinality, maxOverflowSize); case LONG: if (onHeap) { return new LongOnHeapMutableDictionary(); } return new LongOffHeapMutableDictionary(estCaridinality, maxOverflowSize); case FLOAT: if (onHeap) { return new FloatOnHeapMutableDictionary(); } return new FloatOffHeapMutableDictionary(estCaridinality, maxOverflowSize); case DOUBLE: if (onHeap) { return new DoubleOnHeapMutableDictionary(); } return new DoubleOffHeapMutableDictionary(estCaridinality, maxOverflowSize); } throw new UnsupportedOperationException("Unsupported type " + dataType.toString()); } private Object makeRandomNumber(FieldSpec.DataType dataType) { switch (dataType) { case INT: return RANDOM.nextInt(); case LONG: return RANDOM.nextLong(); case FLOAT: return RANDOM.nextFloat(); case DOUBLE: return RANDOM.nextDouble(); } throw new UnsupportedOperationException("Unsupported type " + dataType.toString()); } private void testMultiReadersSingleWriter(MutableDictionary dictionary) throws Exception { Future[] readerFutures = new Future[NUM_READERS]; for (int i = 0; i < NUM_READERS; i++) { readerFutures[i] = EXECUTOR_SERVICE.submit(new Reader(dictionary)); } Future<Void> writerFuture = EXECUTOR_SERVICE.submit(new Writer(dictionary)); writerFuture.get(); for (int i = 0; i < NUM_READERS; i++) { readerFutures[i].get(); } } @AfterClass public void tearDown() { EXECUTOR_SERVICE.shutdown(); } /** * Reader to read the index of each value after it's indexed into the dictionary, then get the value from the index. * <p>We can assume that we always first get the index of a value, then use the index to fetch the value. */ private class Reader implements Callable<Void> { private final MutableDictionary _dictionary; private Reader(MutableDictionary dictionary) { _dictionary = dictionary; } @Override public Void call() throws Exception { try { for (int i = 0; i < NUM_ENTRIES; i++) { int dictId; do { dictId = _dictionary.indexOf(i + 1); } while (dictId < 0); Assert.assertEquals(dictId, i); Assert.assertEquals(_dictionary.getIntValue(dictId), i + 1); // Fetch value by a random existing dictId int randomDictId = RANDOM.nextInt(i + 1); Assert.assertEquals(_dictionary.getIntValue(randomDictId), randomDictId + 1); } return null; } catch (Throwable t) { throw new RuntimeException(t); } } } /** * Writer to index value into dictionary, then check the index of the value. */ private class Writer implements Callable<Void> { private final MutableDictionary _dictionary; private Writer(MutableDictionary dictionary) { _dictionary = dictionary; } @Override public Void call() throws Exception { try { for (int i = 0; i < NUM_ENTRIES; i++) { _dictionary.index(i + 1); Assert.assertEquals(_dictionary.indexOf(i + 1), i); // Index a random existing value int randomValue = RANDOM.nextInt(i + 1) + 1; _dictionary.index(randomValue); Assert.assertEquals(_dictionary.indexOf(randomValue), randomValue - 1); } return null; } catch (Throwable t) { throw new RuntimeException(t); } } } } <file_sep>package com.linkedin.thirdeye.dashboard.resources.v2.rootcause; import com.linkedin.thirdeye.dashboard.resources.v2.RootCauseEntityFormatter; import com.linkedin.thirdeye.dashboard.resources.v2.pojo.RootCauseEntity; import com.linkedin.thirdeye.rootcause.Entity; import com.linkedin.thirdeye.rootcause.impl.DimensionEntity; public class DimensionEntityFormatter extends RootCauseEntityFormatter { @Override public boolean applies(Entity entity) { return DimensionEntity.TYPE.isType(entity.getUrn()); } @Override public RootCauseEntity format(Entity entity) { DimensionEntity e = DimensionEntity.fromURN(entity.getUrn(), entity.getScore()); String label = String.format("%s=%s", e.getName(), e.getValue()); String link = String.format("javascript:alert('%s');", e.getUrn()); return makeRootCauseEntity(entity, "Dimension", label, link); } } <file_sep>package com.linkedin.thirdeye.datalayer.bao.jdbc; import com.google.inject.Singleton; import com.linkedin.thirdeye.datalayer.bao.AlertConfigManager; import com.linkedin.thirdeye.datalayer.dto.AlertConfigDTO; import com.linkedin.thirdeye.datalayer.pojo.AlertConfigBean; import java.util.HashMap; import java.util.List; import java.util.Map; @Singleton public class AlertConfigManagerImpl extends AbstractManagerImpl<AlertConfigDTO> implements AlertConfigManager { public AlertConfigManagerImpl() { super(AlertConfigDTO.class, AlertConfigBean.class); } @Override public List<AlertConfigDTO> findByActive(boolean active) { Map<String, Object> filters = new HashMap<>(); filters.put("active", active); return super.findByParams(filters); } } <file_sep>package com.linkedin.thirdeye.rootcause.impl; import com.linkedin.thirdeye.dataframe.DoubleSeries; import com.linkedin.thirdeye.rootcause.Entity; import com.linkedin.thirdeye.rootcause.Pipeline; import com.linkedin.thirdeye.rootcause.PipelineContext; import com.linkedin.thirdeye.rootcause.PipelineResult; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; /** * NormalizationPipeline normalizes entity scores to a [0.0,1.0] interval based on observed * minimum and maximum scores. */ public class NormalizationPipeline extends Pipeline { /** * Constructor for dependency injection * * @param outputName pipeline output name * @param inputNames input pipeline names */ public NormalizationPipeline(String outputName, Set<String> inputNames) { super(outputName, inputNames); } /** * Alternate constructor for RCAFrameworkLoader * * @param outputName pipeline output name * @param inputNames input pipeline names * @param ignore configuration properties (none) */ public NormalizationPipeline(String outputName, Set<String> inputNames, Map<String, Object> ignore) { super(outputName, inputNames); } @Override public PipelineResult run(PipelineContext context) { List<Entity> entities = new ArrayList<>(context.filter(Entity.class)); double[] score = new double[entities.size()]; for(int i=0; i<entities.size(); i++) { score[i] = entities.get(i).getScore(); } double[] normalized = DoubleSeries.buildFrom(score).normalize().values(); List<Entity> output = new ArrayList<>(entities.size()); for(int i=0; i<entities.size(); i++) { output.add(entities.get(i).withScore(normalized[i])); } return new PipelineResult(context, new HashSet<>(output)); } } <file_sep>import anomaly from './anomaly'; export default { anomaly, }; <file_sep>import { type } from './utils'; /** * Define the action types */ export const ActionTypes = { REQUEST_READ: type('[Anomaly] Request Read'), LOAD: type('[Anomaly] Load'), LOADING: type('[Anomaly] Loading'), REQUEST_FAIL: type('[Anomaly] Request Fail'), }; function request(params) { return { type: ActionTypes.REQUEST_READ, payload: { params, source: 'search' } }; } function loading() { return { type: ActionTypes.LOADING }; } function loadAnomaly(response) { return { type: ActionTypes.LOAD, payload: response }; } function requestFail() { return { type: ActionTypes.REQUEST_FAIL, }; } export const Actions = { request, loading, loadAnomaly, requestFail }; <file_sep>/** * Copyright (C) 2014-2016 LinkedIn Corp. (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.core.query.scheduler; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.linkedin.pinot.common.query.QueryExecutor; import com.linkedin.pinot.common.query.ServerQueryRequest; import com.linkedin.pinot.common.utils.DataTable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import javax.annotation.Nonnull; import javax.annotation.Nullable; import org.apache.commons.configuration.Configuration; import org.apache.commons.configuration.PropertiesConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Abstract class providing common scheduler functionality * including query runner and query worker pool */ public abstract class QueryScheduler { private static final Logger LOGGER = LoggerFactory.getLogger(QueryScheduler.class); public static final String QUERY_RUNNER_CONFIG_KEY = "query_runner_threads"; public static final String QUERY_WORKER_CONFIG_KEY = "query_worker_threads"; // set the main query runner priority higher than NORM but lower than MAX // because if a query is complete we want to deserialize and return response as soon // as possible private static final int QUERY_RUNNER_THREAD_PRIORITY = 7; public static final int DEFAULT_QUERY_RUNNER_THREADS; public static final int DEFAULT_QUERY_WORKER_THREADS; int numQueryRunnerThreads; int numQueryWorkerThreads; // This executor service will run the "main" operation of query processing // including planning, distributing operators across threads, waiting and // reducing the results from the parallel set of operators (MCombineOperator) // protected ListeningExecutorService queryRunners; // TODO: in future, this should be driven by configured policy // like poolPerTable or QoS based pools etc // The policy should also determine how many threads we use per query // These are worker threads to parallelize execution of query operators // across groups of segments. protected ListeningExecutorService queryWorkers; final QueryExecutor queryExecutor; static { int numCores = Runtime.getRuntime().availableProcessors(); // arbitrary...but not completely arbitrary DEFAULT_QUERY_RUNNER_THREADS = numCores; DEFAULT_QUERY_WORKER_THREADS = 2 * numCores; } /** * Constructor to initialize QueryScheduler based on scheduler configuration. * The configuration variables can control the size of query executors per servers. * 'pinot.query.scheduler.query_runner_threads' : controls the number 'main' threads for executing queries. * These remain "blocked" for the duration of query execution and indicate the number of parallel queries * a server can run. (default: equal to the number of cores on the server) * 'pinot.query.scheduler.query_worker_threads' : controls the total number of workers for query execution. * Actual work of parallel processing of a query on each segment is done by these threads. * (Default: 2 * number of cores) */ public QueryScheduler(@Nonnull Configuration schedulerConfig, @Nonnull QueryExecutor queryExecutor) { Preconditions.checkNotNull(schedulerConfig); Preconditions.checkNotNull(queryExecutor); numQueryRunnerThreads = schedulerConfig.getInt(QUERY_RUNNER_CONFIG_KEY, DEFAULT_QUERY_RUNNER_THREADS); numQueryWorkerThreads = schedulerConfig.getInt(QUERY_WORKER_CONFIG_KEY, DEFAULT_QUERY_WORKER_THREADS); LOGGER.info("Initializing with {} query runner threads and {} worker threads", numQueryRunnerThreads, numQueryWorkerThreads); // pqr -> pinot query runner (to give short names) ThreadFactory queryRunnerFactory = new ThreadFactoryBuilder().setDaemon(false) .setPriority(QUERY_RUNNER_THREAD_PRIORITY) .setNameFormat("pqr-%d") .build(); queryRunners = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numQueryRunnerThreads, queryRunnerFactory)); // pqw -> pinot query workers ThreadFactory queryWorkersFactory = new ThreadFactoryBuilder().setDaemon(false) .setPriority(Thread.NORM_PRIORITY) .setNameFormat("pqw-%d") .build(); queryWorkers = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numQueryWorkerThreads, queryWorkersFactory)); this.queryExecutor = queryExecutor; } public QueryScheduler(@Nullable QueryExecutor queryExecutor) { this(new PropertiesConfiguration(), queryExecutor); } public abstract ListenableFuture<DataTable> submit(@Nonnull ServerQueryRequest queryRequest); public @Nullable QueryExecutor getQueryExecutor() { return queryExecutor; } public ExecutorService getWorkerExecutorService() { return queryWorkers; } } <file_sep>package com.linkedin.thirdeye.dataframe; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Random; import java.util.Set; import org.apache.commons.lang.ArrayUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class DataFrameBenchmark { // TODO: validate benchmarking method - Dead Code Elimination, etc. may be playing tricks on us. private static final Logger LOG = LoggerFactory.getLogger(DataFrameBenchmark.class); private static final int N_ROUNDS = 15; private static final int N_ROUNDS_SLOW = 3; private static final int N_ELEMENTS = 10_000_000; private static final String[] SERIES_NAMES = new String[] { "task", "min", "mid", "max", "outer", "checksum", "samples" }; private static final long SEED = System.nanoTime(); long tStart; long tStartOuter; List<Long> times = new ArrayList<>(); long timeOuter; DataFrame.Builder results = DataFrame.builder(SERIES_NAMES); void benchmarkMapDoubleSeries() { startTimerOuter(); long checksum = 0; for (int r = 0; r < N_ROUNDS; r++) { double[] doubleValues = generateDoubleData(N_ELEMENTS); final double delta = r; startTimer(); DoubleSeries s = DoubleSeries.buildFrom(doubleValues); DoubleSeries sResult = s.map(new Series.DoubleFunction() { @Override public double apply(double... values) { return values[0] + delta; } }); stopTimer(); checksum ^= checksum(sResult.values()); } logResults("benchmarkMapDoubleSeries", checksum); } void benchmarkMapDoubleSeriesOperation() { startTimerOuter(); long checksum = 0; for (int r = 0; r < N_ROUNDS; r++) { double[] doubleValues = generateDoubleData(N_ELEMENTS); final double delta = r; startTimer(); DoubleSeries s = DoubleSeries.buildFrom(doubleValues); DoubleSeries sResult = s.add(delta); stopTimer(); checksum ^= checksum(sResult.values()); } logResults("benchmarkMapDoubleSeriesOperation", checksum); } void benchmarkMapDoubleArray() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { double[] doubleValues = generateDoubleData(N_ELEMENTS); final double delta = r; startTimer(); double[] results = new double[doubleValues.length]; for (int i = 0; i < doubleValues.length; i++) { results[i] = doubleValues[i] + delta; } stopTimer(); checksum ^= checksum(results); } logResults("benchmarkMapDoubleArray", checksum); } void benchmarkMapLongSeries() { startTimerOuter(); long checksum = 0; for (int r = 0; r < N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); final long delta = r; startTimer(); LongSeries s = LongSeries.buildFrom(longValues); LongSeries sResult = s.map(new Series.LongFunction() { @Override public long apply(long... values) { return values[0] + delta; } }); stopTimer(); checksum ^= checksum(sResult.values()); } logResults("benchmarkMapLongSeries", checksum); } void benchmarkMapLongSeriesOperation() { startTimerOuter(); long checksum = 0; for (int r = 0; r < N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); final long delta = r; startTimer(); LongSeries s = LongSeries.buildFrom(longValues); LongSeries sResult = s.add(delta); stopTimer(); checksum ^= checksum(sResult.values()); } logResults("benchmarkMapLongSeriesOperation", checksum); } void benchmarkMapLongArray() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); final long delta = r; startTimer(); long[] results = new long[longValues.length]; for (int i = 0; i < longValues.length; i++) { results[i] = longValues[i] + delta; } stopTimer(); checksum ^= checksum(results); } logResults("benchmarkMapLongArray", checksum); } void benchmarkMapTwoSeriesExpression() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS_SLOW; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); DataFrame df = new DataFrame(); df.addSeries("long", longValues); df.addSeries("double", doubleValues); startTimer(); DoubleSeries res = df.map("long * double"); stopTimer(); checksum ^= checksum(res.values()); } logResults("benchmarkMapTwoSeriesExpression", checksum); } void benchmarkMapTwoSeries() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); DataFrame df = new DataFrame(); df.addSeries("long", longValues); df.addSeries("double", doubleValues); startTimer(); DoubleSeries res = df.map(new Series.DoubleFunction() { @Override public double apply(double... values) { return values[0] * values[1]; } }, "long", "double"); stopTimer(); checksum ^= checksum(res.values()); } logResults("benchmarkMapTwoSeries", checksum); } void benchmarkMapTwoSeriesOperation() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); DataFrame df = new DataFrame(); df.addSeries("long", longValues); df.addSeries("double", doubleValues); startTimer(); LongSeries l = df.getLongs("long"); DoubleSeries d = df.getDoubles("double"); DoubleSeries res = d.multiply(l); stopTimer(); checksum ^= checksum(res.values()); } logResults("benchmarkMapTwoSeriesOperation", checksum); } void benchmarkMapTwoArrays() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); startTimer(); double[] results = new double[N_ELEMENTS]; for(int i=0; i<N_ELEMENTS; i++) { results[i] = longValues[i] * doubleValues[i]; } stopTimer(); checksum ^= checksum(results); } logResults("benchmarkMapTwoArrays", checksum); } void benchmarkMapThreeSeries() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); long[] otherValues = generateLongData(N_ELEMENTS); DataFrame df = new DataFrame(); df.addSeries("long", longValues); df.addSeries("double", doubleValues); df.addSeries("other", otherValues); startTimer(); DoubleSeries res = df.map(new Series.DoubleFunction() { @Override public double apply(double... values) { return values[0] * values[1] + values[2]; } }, "long", "double", "other"); stopTimer(); checksum ^= checksum(res.values()); } logResults("benchmarkMapThreeSeries", checksum); } void benchmarkMapThreeArrays() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); long[] otherValues = generateLongData(N_ELEMENTS); startTimer(); double[] results = new double[N_ELEMENTS]; for(int i=0; i<N_ELEMENTS; i++) { results[i] = longValues[i] * doubleValues[i] + otherValues[i]; } stopTimer(); checksum ^= checksum(results); } logResults("benchmarkMapThreeArrays", checksum); } void benchmarkMapFourSeriesGeneric() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); long[] otherValues = generateLongData(N_ELEMENTS); double[] anotherValues = generateDoubleData(N_ELEMENTS); DataFrame df = new DataFrame(); df.addSeries("long", longValues); df.addSeries("double", doubleValues); df.addSeries("other", otherValues); df.addSeries("another", anotherValues); startTimer(); DoubleSeries res = df.map(new Series.DoubleFunction() { @Override public double apply(double... values) { return values[0] * values[1] + values[2] / values[3]; } }, "long", "double", "other", "another"); stopTimer(); checksum ^= checksum(res.values()); } logResults("benchmarkMapFourSeriesGeneric", checksum); } void benchmarkMapFourArrays() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); double[] doubleValues = generateDoubleData(N_ELEMENTS); long[] otherValues = generateLongData(N_ELEMENTS); double[] anotherValues = generateDoubleData(N_ELEMENTS); startTimer(); double[] results = new double[N_ELEMENTS]; for(int i=0; i<N_ELEMENTS; i++) { results[i] = longValues[i] * doubleValues[i] + otherValues[i] / anotherValues[i]; } stopTimer(); checksum ^= checksum(results); } logResults("benchmarkMapFourArrays", checksum); } void benchmarkMinMaxLongSeries() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); LongSeries s = LongSeries.buildFrom(longValues); startTimer(); long min = s.min(); long max = s.max(); stopTimer(); checksum ^= checksum(min, max); } logResults("benchmarkMinMaxLongSeries", checksum); } void benchmarkMinMaxLongArray() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); startTimer(); long min = longValues[0]; long max = longValues[0]; for (long v : longValues) { if (min > v) min = v; if (max < v) max = v; } stopTimer(); checksum ^= checksum(min, max); } logResults("benchmarkMinMaxLongArray", checksum); } void benchmarkEqualsLongArray() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); long[] otherValues = Arrays.copyOf(longValues, longValues.length); startTimer(); if(!Arrays.equals(longValues, otherValues)) throw new IllegalStateException("Arrays must be equal"); stopTimer(); checksum ^= checksum(longValues); checksum ^= checksum(otherValues); } logResults("benchmarkEqualsLongArray", checksum); } void benchmarkEqualsLongSeries() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); long[] otherValues = Arrays.copyOf(longValues, longValues.length); LongSeries series = LongSeries.buildFrom(longValues); LongSeries other = LongSeries.buildFrom(otherValues); startTimer(); if(!series.equals(other)) throw new IllegalStateException("Series must be equal"); stopTimer(); checksum ^= checksum(series.values()); checksum ^= checksum(other.values()); } logResults("benchmarkEqualsLongSeries", checksum); } void benchmarkEqualsLongSeriesOperation() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); long[] otherValues = Arrays.copyOf(longValues, longValues.length); LongSeries series = LongSeries.buildFrom(longValues); LongSeries other = LongSeries.buildFrom(otherValues); startTimer(); BooleanSeries res = series.eq(other); stopTimer(); if(res.hasFalse()) throw new IllegalStateException("Series must be equal"); checksum ^= checksum(series.values()); checksum ^= checksum(other.values()); } logResults("benchmarkEqualsLongSeriesOperation", checksum); } void benchmarkSortLongArray() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS_SLOW; r++) { long[] longValues = generateLongData(N_ELEMENTS); startTimer(); Arrays.sort(longValues); stopTimer(); checksum ^= checksum(longValues); } logResults("benchmarkSortLongArray", checksum); } void benchmarkSortLongSeries() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS_SLOW; r++) { long[] longValues = generateLongData(N_ELEMENTS); LongSeries series = LongSeries.buildFrom(longValues); startTimer(); LongSeries out = series.sorted(); stopTimer(); checksum ^= checksum(out.values()); } logResults("benchmarkSortLongSeries", checksum); } void benchmarkUniqueLongArrayWithObjects() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS_SLOW; r++) { long[] longValues = generateLongData(N_ELEMENTS); startTimer(); Set<Long> set = new HashSet<>(); for(long l : longValues) set.add(l); long[] out = ArrayUtils.toPrimitive(set.toArray(new Long[set.size()])); stopTimer(); checksum ^= checksum(out); } logResults("benchmarkUniqueLongArrayWithObjects", checksum); } void benchmarkUniqueLongSeries() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS_SLOW; r++) { long[] longValues = generateLongData(N_ELEMENTS); LongSeries series = LongSeries.buildFrom(longValues); startTimer(); LongSeries out = series.unique(); stopTimer(); checksum ^= checksum(out.values()); } logResults("benchmarkUniqueLongSeries", checksum); } void benchmarkShiftLongArray() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); startTimer(); long[] values = new long[N_ELEMENTS]; System.arraycopy(longValues, 0, values, N_ELEMENTS / 2, N_ELEMENTS / 2); Arrays.fill(values, 0, N_ELEMENTS / 2, Long.MIN_VALUE); stopTimer(); checksum ^= checksum(values); } logResults("benchmarkShiftLongArray", checksum); } void benchmarkShiftLongSeries() { startTimerOuter(); long checksum = 0; for(int r=0; r<N_ROUNDS; r++) { long[] longValues = generateLongData(N_ELEMENTS); LongSeries series = LongSeries.buildFrom(longValues); startTimer(); LongSeries out = series.shift(N_ELEMENTS / 2); stopTimer(); checksum ^= checksum(out.values()); } logResults("benchmarkShiftLongSeries", checksum); } void benchmarkAll() { benchmarkMinMaxLongSeries(); benchmarkMinMaxLongArray(); benchmarkEqualsLongSeries(); benchmarkEqualsLongSeriesOperation(); benchmarkEqualsLongArray(); benchmarkShiftLongSeries(); benchmarkShiftLongArray(); benchmarkSortLongSeries(); benchmarkSortLongArray(); benchmarkUniqueLongSeries(); benchmarkUniqueLongArrayWithObjects(); benchmarkMapDoubleSeries(); benchmarkMapDoubleSeriesOperation(); benchmarkMapDoubleArray(); benchmarkMapLongSeries(); benchmarkMapLongSeriesOperation(); benchmarkMapLongArray(); benchmarkMapTwoSeries(); benchmarkMapTwoSeriesOperation(); benchmarkMapTwoArrays(); benchmarkMapThreeSeries(); benchmarkMapThreeArrays(); benchmarkMapFourSeriesGeneric(); benchmarkMapFourArrays(); benchmarkMapTwoSeriesExpression(); } void startTimer() { this.tStart = System.nanoTime(); } void stopTimer() { long tDelta = System.nanoTime() - this.tStart; this.times.add(tDelta); } void startTimerOuter() { this.tStartOuter = System.nanoTime(); } void stopTimerOuter() { this.timeOuter = System.nanoTime() - this.tStartOuter; } void logResults(String name, long checksum) { stopTimerOuter(); Collections.sort(this.times); long tMid = this.times.get(this.times.size() / 2); long tMin = Collections.min(this.times); long tMax = Collections.max(this.times); LOG.info("{}: min/mid/max = {}ms {}ms {}ms [all={}ms, chk={}, cnt={}]", name, tMin / 1000000, tMid / 1000000, tMax / 1000000, timeOuter / 1000000, checksum % 1000, this.times.size()); this.results.append(name, tMin, tMid, tMax, this.timeOuter, checksum, this.times.size()); // reset timer stats this.times = new ArrayList<>(); } public static void main(String[] args) throws Exception { LOG.info("Press Enter key to start."); System.in.read(); LOG.info("Running DataFrame benchmark ..."); DataFrameBenchmark b = new DataFrameBenchmark(); b.benchmarkAll(); Series.LongFunction toMillis = new Series.LongFunction() { @Override public long apply(long... values) { return values[0] / 1000000; } }; DataFrame df = b.results.build(); df.mapInPlace(toMillis, "min"); df.mapInPlace(toMillis, "mid"); df.mapInPlace(toMillis, "max"); df.mapInPlace(toMillis, "outer"); df.mapInPlace(new Series.LongFunction() { @Override public long apply(long... values) { return values[0] % 1000; } }, "checksum"); LOG.info("Summary:\n{}", df.toString(40, SERIES_NAMES)); LOG.info("done."); } static double[] generateDoubleData(int n) { Random r = new Random(); r.setSeed(SEED); double[] values = new double[n]; for(int i=0; i<n; i++) { values[i] = r.nextDouble(); } return values; } static long[] generateLongData(int n) { Random r = new Random(); r.setSeed(SEED); long[] values = new long[n]; for(int i=0; i<n; i++) { values[i] = r.nextLong(); } return values; } static long checksum(long... values) { long bits = 0; for(long v : values) { bits ^= v; } return bits; } static long checksum(double... values) { long bits = 0; for(double v : values) { bits ^= Double.doubleToLongBits(v); } return bits; } } <file_sep>package com.linkedin.thirdeye.anomaly.utils; import com.yammer.metrics.core.Counter; import com.yammer.metrics.core.MetricsRegistry; import com.yammer.metrics.reporting.JmxReporter; public class ThirdeyeMetricsUtil { private static MetricsRegistry metricsRegistry = new MetricsRegistry(); private static JmxReporter jmxReporter = new JmxReporter(metricsRegistry); static { jmxReporter.start(); } private ThirdeyeMetricsUtil() { } public static final Counter detectionTaskCounter = metricsRegistry.newCounter(ThirdeyeMetricsUtil.class, "detectionTaskCounter"); public static final Counter detectionTaskSuccessCounter = metricsRegistry.newCounter(ThirdeyeMetricsUtil.class, "detectionTaskSuccessCounter"); public static final Counter alertTaskSuccessCounter = metricsRegistry.newCounter(ThirdeyeMetricsUtil.class, "alertTaskSuccessCounter"); public static final Counter dbCallCounter = metricsRegistry.newCounter(ThirdeyeMetricsUtil.class, "dbCallCounter"); public static MetricsRegistry getMetricsRegistry() { return metricsRegistry; } } <file_sep>/** * Copyright (C) 2014-2016 LinkedIn Corp. (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.server.request; import com.google.common.base.Function; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.linkedin.pinot.common.exception.QueryException; import com.linkedin.pinot.common.metrics.ServerMeter; import com.linkedin.pinot.common.metrics.ServerMetrics; import com.linkedin.pinot.common.metrics.ServerQueryPhase; import com.linkedin.pinot.common.query.ServerQueryRequest; import com.linkedin.pinot.common.query.context.TimerContext; import com.linkedin.pinot.common.request.InstanceRequest; import com.linkedin.pinot.common.utils.DataTable; import com.linkedin.pinot.core.common.datatable.DataTableImplV2; import com.linkedin.pinot.core.query.scheduler.QueryScheduler; import com.linkedin.pinot.serde.SerDe; import com.linkedin.pinot.transport.netty.NettyServer; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import java.net.InetSocketAddress; import javax.annotation.Nullable; import org.apache.thrift.protocol.TCompactProtocol; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ScheduledRequestHandler implements NettyServer.RequestHandler { private static final Logger LOGGER = LoggerFactory.getLogger(ScheduledRequestHandler.class); private final ServerMetrics serverMetrics; private final QueryScheduler queryScheduler; public ScheduledRequestHandler(QueryScheduler queryScheduler, ServerMetrics serverMetrics) { this.queryScheduler = queryScheduler; this.serverMetrics = serverMetrics; } @Override public ListenableFuture<byte[]> processRequest(ChannelHandlerContext channelHandlerContext, ByteBuf request) { final long queryStartTimeNs = System.nanoTime(); serverMetrics.addMeteredGlobalValue(ServerMeter.QUERIES, 1); LOGGER.debug("Processing request : {}", request); byte[] byteArray = new byte[request.readableBytes()]; request.readBytes(byteArray); SerDe serDe = new SerDe(new TCompactProtocol.Factory()); final InstanceRequest instanceRequest = new InstanceRequest(); if (! serDe.deserialize(instanceRequest, byteArray)) { LOGGER.error("Failed to deserialize query request from broker ip: {}", ((InetSocketAddress) channelHandlerContext.channel().remoteAddress()).getAddress().getHostAddress()); DataTable result = new DataTableImplV2(); result.addException(QueryException.INTERNAL_ERROR); serverMetrics.addMeteredGlobalValue(ServerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1); ServerQueryRequest queryRequest = new ServerQueryRequest(null, serverMetrics); queryRequest.getTimerContext().setQueryArrivalTimeNs(queryStartTimeNs); return Futures.immediateFuture(serializeDataTable(queryRequest, result)); } final ServerQueryRequest queryRequest = new ServerQueryRequest(instanceRequest, serverMetrics); final TimerContext timerContext = queryRequest.getTimerContext(); timerContext.setQueryArrivalTimeNs(queryStartTimeNs); TimerContext.Timer deserializationTimer = timerContext.startNewPhaseTimerAtNs(ServerQueryPhase.REQUEST_DESERIALIZATION, queryStartTimeNs); deserializationTimer.stopAndRecord(); LOGGER.debug("Processing requestId:{},request={}", instanceRequest.getRequestId(), instanceRequest); ListenableFuture<DataTable> queryTask = queryScheduler.submit(queryRequest); // following future will provide default response in case of uncaught // exceptions from query processing ListenableFuture<DataTable> queryResponse = Futures.catching(queryTask, Throwable.class, new Function<Throwable, DataTable>() { @Nullable @Override public DataTable apply(@Nullable Throwable input) { // this is called iff queryTask fails with unhandled exception serverMetrics.addMeteredGlobalValue(ServerMeter.UNCAUGHT_EXCEPTIONS, 1); DataTable result = new DataTableImplV2(); result.addException(QueryException.INTERNAL_ERROR); return result; } }); // transform the DataTable to serialized byte[] to send back to broker ListenableFuture<byte[]> serializedQueryResponse = Futures.transform(queryResponse, new Function<DataTable, byte[]>() { @Nullable @Override public byte[] apply(@Nullable DataTable instanceResponse) { byte[] responseData = serializeDataTable(queryRequest, instanceResponse); LOGGER.info("Processed requestId {},reqSegments={},prunedToSegmentCount={},deserTimeMs={},planTimeMs={},planExecTimeMs={},totalExecMs={},serTimeMs={}TotalTimeMs={},broker={}", queryRequest.getInstanceRequest().getRequestId(), queryRequest.getInstanceRequest().getSearchSegments().size(), queryRequest.getSegmentCountAfterPruning(), timerContext.getPhaseDurationMs(ServerQueryPhase.REQUEST_DESERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.BUILD_QUERY_PLAN), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PLAN_EXECUTION), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PROCESSING), timerContext.getPhaseDurationMs(ServerQueryPhase.RESPONSE_SERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.TOTAL_QUERY_TIME), queryRequest.getBrokerId()); return responseData; } }); return serializedQueryResponse; } static byte[] serializeDataTable(ServerQueryRequest queryRequest, DataTable instanceResponse) { byte[] responseByte; InstanceRequest instanceRequest = queryRequest.getInstanceRequest(); ServerMetrics metrics = queryRequest.getServerMetrics(); TimerContext timerContext = queryRequest.getTimerContext(); timerContext.startNewPhaseTimer(ServerQueryPhase.RESPONSE_SERIALIZATION); long requestId = instanceRequest != null ? instanceRequest.getRequestId() : -1; String brokerId = instanceRequest != null ? instanceRequest.getBrokerId() : "null"; try { if (instanceResponse == null) { LOGGER.warn("Instance response is null for requestId: {}, brokerId: {}", requestId, brokerId); responseByte = new byte[0]; } else { responseByte = instanceResponse.toBytes(); } } catch (Exception e) { metrics.addMeteredGlobalValue(ServerMeter.RESPONSE_SERIALIZATION_EXCEPTIONS, 1); LOGGER.error("Got exception while serializing response for requestId: {}, brokerId: {}", requestId, brokerId, e); responseByte = null; } timerContext.getPhaseTimer(ServerQueryPhase.RESPONSE_SERIALIZATION).stopAndRecord(); timerContext.startNewPhaseTimerAtNs(ServerQueryPhase.TOTAL_QUERY_TIME, timerContext.getQueryArrivalTimeNs()); timerContext.getPhaseTimer(ServerQueryPhase.TOTAL_QUERY_TIME).stopAndRecord(); return responseByte; } } <file_sep>package com.linkedin.thirdeye.anomaly.alert; import com.linkedin.thirdeye.anomaly.utils.AnomalyUtils; import java.util.ArrayList; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.joda.time.DateTime; import org.quartz.CronScheduleBuilder; import org.quartz.CronTrigger; import org.quartz.JobBuilder; import org.quartz.JobDetail; import org.quartz.JobKey; import org.quartz.Scheduler; import org.quartz.SchedulerException; import org.quartz.SchedulerFactory; import org.quartz.Trigger; import org.quartz.TriggerBuilder; import org.quartz.impl.StdSchedulerFactory; import org.quartz.impl.matchers.GroupMatcher; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.linkedin.thirdeye.anomaly.job.JobContext; import com.linkedin.thirdeye.anomaly.job.JobScheduler; import com.linkedin.thirdeye.anomaly.task.TaskConstants.TaskType; import com.linkedin.thirdeye.datalayer.bao.EmailConfigurationManager; import com.linkedin.thirdeye.datalayer.bao.JobManager; import com.linkedin.thirdeye.datalayer.bao.TaskManager; import com.linkedin.thirdeye.datalayer.dto.EmailConfigurationDTO; import com.linkedin.thirdeye.datasource.DAORegistry; /** * Scheduler for anomaly detection jobs */ public class AlertJobScheduler implements JobScheduler, Runnable { private static final Logger LOG = LoggerFactory.getLogger(AlertJobScheduler.class); public static final int DEFAULT_ALERT_DELAY = 10; public static final TimeUnit DEFAULT_ALERT_DELAY_UNIT = TimeUnit.MINUTES; private SchedulerFactory schedulerFactory; private Scheduler quartzScheduler; private ScheduledExecutorService scheduledExecutorService; private JobManager anomalyJobDAO; private TaskManager anomalyTaskDAO; private EmailConfigurationManager emailConfigurationDAO; private static final DAORegistry DAO_REGISTRY = DAORegistry.getInstance(); public AlertJobScheduler() { this.anomalyJobDAO = DAO_REGISTRY.getJobDAO(); this.anomalyTaskDAO = DAO_REGISTRY.getTaskDAO(); this.emailConfigurationDAO = DAO_REGISTRY.getEmailConfigurationDAO(); schedulerFactory = new StdSchedulerFactory(); try { quartzScheduler = schedulerFactory.getScheduler(); } catch (SchedulerException e) { LOG.error("Exception while starting quartz scheduler", e); } scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); } public List<String> getScheduledJobs() throws SchedulerException { List<String> activeJobKeys = new ArrayList<>(); for (String groupName : quartzScheduler.getJobGroupNames()) { for (JobKey jobKey : quartzScheduler.getJobKeys(GroupMatcher.jobGroupEquals(groupName))) { activeJobKeys.add(jobKey.getName()); } } return activeJobKeys; } public void start() throws SchedulerException { quartzScheduler.start(); scheduledExecutorService.scheduleWithFixedDelay(this, 0, DEFAULT_ALERT_DELAY, DEFAULT_ALERT_DELAY_UNIT); } public void run() { try { // read all alert configs LOG.info("Reading all alert configs.."); List<EmailConfigurationDTO> alertConfigs = emailConfigurationDAO.findAll(); // get active jobs List<String> scheduledJobs = getScheduledJobs(); LOG.info("Scheduled jobs {}", scheduledJobs); for (EmailConfigurationDTO alertConfig : alertConfigs) { Long id = alertConfig.getId(); String jobKey = getJobKey(id); boolean isActive = alertConfig.isActive(); boolean isScheduled = scheduledJobs.contains(jobKey); // for all jobs with isActive, but not in scheduled jobs, // schedule them with quartz, as function is newly created, or newly activated if (isActive && !isScheduled) { LOG.info("Found active but not scheduled {}", id); startJob(alertConfig, jobKey); } // for all jobs with not isActive, but in scheduled jobs, // remove them from quartz, as function is newly deactivated else if (!isActive && isScheduled) { LOG.info("Found inactive but scheduled {}", id); stopJob(jobKey); } // for all jobs with isActive, and isScheduled, // updates to a function will be picked up automatically by the next run // but check for cron updates else if (isActive && isScheduled) { String cronInDatabase = alertConfig.getCron(); List<Trigger> triggers = (List<Trigger>) quartzScheduler.getTriggersOfJob(JobKey.jobKey(jobKey)); CronTrigger cronTrigger = (CronTrigger) triggers.get(0); String cronInSchedule = cronTrigger.getCronExpression(); // cron expression has been updated, restart this job if (!cronInDatabase.equals(cronInSchedule)) { LOG.info("Cron expression for config {} with jobKey {} has been changed from {} to {}. " + "Restarting schedule", id, jobKey, cronInSchedule, cronInDatabase); stopJob(jobKey); startJob(alertConfig, jobKey); } } // for all jobs with not isActive, and not isScheduled, no change required } // for any scheduled jobs, not having a function in the database, // stop the schedule, as function has been deleted for (String scheduledJobKey : scheduledJobs) { Long configId = getIdFromJobKey(scheduledJobKey); EmailConfigurationDTO alertConfigSpec = emailConfigurationDAO.findById(configId); if (alertConfigSpec == null) { LOG.info("Found scheduled, but not in database {}", configId); stopJob(scheduledJobKey); } } } catch (SchedulerException e) { LOG.error("Exception in reading active jobs", e); } } public void shutdown() throws SchedulerException { AnomalyUtils.safelyShutdownExecutionService(scheduledExecutorService, this.getClass()); quartzScheduler.shutdown(); } public void startJob(Long id) throws SchedulerException { EmailConfigurationDTO alertConfig = emailConfigurationDAO.findById(id); if (alertConfig == null) { throw new IllegalArgumentException("No alert config with id " + id); } if (!alertConfig.isActive()) { throw new IllegalStateException("Alert config with id " + id + " is not active"); } String jobKey = getJobKey(alertConfig.getId()); startJob(alertConfig, jobKey); } private void startJob(EmailConfigurationDTO alertConfig, String jobKey) throws SchedulerException { if (quartzScheduler.checkExists(JobKey.jobKey(jobKey))) { throw new IllegalStateException("Alert config " + jobKey + " is already scheduled"); } AlertJobContext alertJobContext = new AlertJobContext(); alertJobContext.setJobDAO(anomalyJobDAO); alertJobContext.setTaskDAO(anomalyTaskDAO); alertJobContext.setEmailConfigurationDAO(emailConfigurationDAO); alertJobContext.setAlertConfigId(alertConfig.getId()); alertJobContext.setAlertConfig(alertConfig); alertJobContext.setJobName(jobKey); scheduleJob(alertJobContext, alertConfig); } public void stopJob(Long id) throws SchedulerException { String jobKey = getJobKey(id); stopJob(jobKey); } public void stopJob(String jobKey) throws SchedulerException { if (!quartzScheduler.checkExists(JobKey.jobKey(jobKey))) { throw new IllegalStateException("Cannot stop alert config " + jobKey + ", it has not been scheduled"); } quartzScheduler.deleteJob(JobKey.jobKey(jobKey)); LOG.info("Stopped alert config {}", jobKey); } public void runAdHoc(Long id, DateTime windowStartTime, DateTime windowEndTime) { EmailConfigurationDTO alertConfig = emailConfigurationDAO.findById(id); if (alertConfig == null) { throw new IllegalArgumentException("No alert config with id " + id); } String triggerKey = String.format("alert_adhoc_trigger_%d", id); Trigger trigger = TriggerBuilder.newTrigger().withIdentity(triggerKey).startNow().build(); String jobKey = "adhoc_" + getJobKey(id); JobDetail job = JobBuilder.newJob(AlertJobRunner.class).withIdentity(jobKey).build(); AlertJobContext alertJobContext = new AlertJobContext(); alertJobContext.setJobDAO(anomalyJobDAO); alertJobContext.setTaskDAO(anomalyTaskDAO); alertJobContext.setEmailConfigurationDAO(emailConfigurationDAO); alertJobContext.setAlertConfigId(id); alertJobContext.setJobName(jobKey); job.getJobDataMap().put(AlertJobRunner.ALERT_JOB_CONTEXT, alertJobContext); job.getJobDataMap().put(AlertJobRunner.ALERT_JOB_MONITORING_WINDOW_START_TIME, windowStartTime); job.getJobDataMap().put(AlertJobRunner.ALERT_JOB_MONITORING_WINDOW_END_TIME, windowEndTime); try { quartzScheduler.scheduleJob(job, trigger); } catch (SchedulerException e) { LOG.error("Exception while scheduling job", e); } LOG.info("Started {}: {}", jobKey, alertConfig); } private void scheduleJob(JobContext jobContext, EmailConfigurationDTO alertConfig) { LOG.info("Starting {}", jobContext.getJobName()); String triggerKey = String.format("alert_scheduler_trigger_%d", alertConfig.getId()); CronTrigger trigger = TriggerBuilder.newTrigger().withIdentity(triggerKey) .withSchedule(CronScheduleBuilder.cronSchedule(alertConfig.getCron())).build(); String jobKey = jobContext.getJobName(); JobDetail job = JobBuilder.newJob(AlertJobRunner.class).withIdentity(jobKey).build(); job.getJobDataMap().put(AlertJobRunner.ALERT_JOB_CONTEXT, jobContext); try { quartzScheduler.scheduleJob(job, trigger); } catch (SchedulerException e) { LOG.error("Exception while scheduling alert job", e); } LOG.info("Started {}: {}", jobKey, alertConfig); } private String getJobKey(Long id) { String jobKey = String.format("%s_%d", TaskType.ALERT, id); return jobKey; } private Long getIdFromJobKey(String jobKey) { String[] tokens = jobKey.split("_"); String id = tokens[tokens.length - 1]; return Long.valueOf(id); } } <file_sep>/** * Copyright (C) 2014-2016 LinkedIn Corp. (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.integration.tests; import com.linkedin.pinot.core.indexsegment.generator.SegmentVersion; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.json.JSONObject; import org.testng.Assert; import org.testng.annotations.Test; /** * enables indexes on a bunch of columns * */ public class InvertedIndexOfflineIntegrationTest extends OfflineClusterIntegrationTest { private static final List<String> ORIGIN_INVERTED_INDEX_COLUMNS = Arrays.asList("FlightNum", "Origin", "Quarter"); private static final List<String> UPDATED_INVERTED_INDEX_COLUMNS = Arrays.asList("FlightNum", "Origin", "Quarter", "DivActualElapsedTime"); private static final String TEST_QUERY = "SELECT COUNT(*) FROM mytable WHERE DivActualElapsedTime = 305"; private static final long MAX_RELOAD_TIME_IN_MILLIS = 5000L; @Override protected void createTable() throws Exception { addOfflineTable("DaysSinceEpoch", "daysSinceEpoch", -1, "", null, null, ORIGIN_INVERTED_INDEX_COLUMNS, null, "mytable", SegmentVersion.v1, null); } @Test public void testInvertedIndexTrigger() throws Exception { runQuery(TEST_QUERY, Collections.singletonList(TEST_QUERY)); JSONObject queryResponse = postQuery(TEST_QUERY); Assert.assertEquals(queryResponse.getLong("numEntriesScannedInFilter"), TOTAL_DOCS); updateOfflineTable("DaysSinceEpoch", -1, "", null, null, UPDATED_INVERTED_INDEX_COLUMNS, null, "mytable", SegmentVersion.v1, null); triggerReload(); long endTime = System.currentTimeMillis() + MAX_RELOAD_TIME_IN_MILLIS; while (System.currentTimeMillis() < endTime) { runQuery(TEST_QUERY, Collections.singletonList(TEST_QUERY)); queryResponse = postQuery(TEST_QUERY); // Total docs should not change during reload Assert.assertEquals(queryResponse.getLong("totalDocs"), TOTAL_DOCS); if (queryResponse.getLong("numEntriesScannedInFilter") == 0L) { break; } } Assert.assertTrue(System.currentTimeMillis() < endTime); } private void triggerReload() throws Exception { sendGetRequest(CONTROLLER_BASE_API_URL + "/tables/mytable/segments/reload?type=offline"); } } <file_sep>import Ember from 'ember'; import fetch from 'fetch'; import { Actions as AnomalyActions } from 'thirdeye-frontend/actions/anomaly'; export default Ember.Route.extend({ redux: Ember.inject.service(), model(params) { const { id } = params; const redux = this.get('redux'); redux.dispatch(AnomalyActions.loading()); fetch(`/anomalies/search/anomalyIds/1492498800000/1492585200000/1?anomalyIds=${id}&functionName=`) .then(res => res.json()) .then(response => redux.dispatch(AnomalyActions.loadAnomaly(response))) .catch(() => redux.dispatch(AnomalyActions.requestFail())); return {}; } });
8a56fd9cd232c33001b60967f1582dd172c584de
[ "JavaScript", "Java" ]
21
Java
lzm7455/pinot
54d8cd8eb464b3438dec3ff134c6027514053b59
e63af20c4c8815d7a202914ee3b57453d27ed5b6
refs/heads/master
<repo_name>otownsend92/cs271_proj1_java<file_sep>/mod_deploy_ot.cpp // // mod_deploy_ot.cpp // // // Created by <NAME> on 12/8/14. // // #include "mod_deploy_ot.h" <file_sep>/clientServer/src/clientserver/ClientServer.java package clientserver; import java.io.IOException; import java.net.ServerSocket; import java.net.Socket; import java.io.*; import static java.lang.Thread.sleep; import java.util.Arrays; import java.util.Vector; import java.util.logging.Level; import java.util.logging.Logger; public class ClientServer implements Runnable { // make new paxos object public static Paxos paxosObject = new Paxos(); public static PaxosQueue paxosQueueObj = new PaxosQueue(); public static Log logObject = new Log(); public static int ctrlc = 0; public static int isFail = 0; public static int serverId; public static int heardFrom = 0; public static double balance = 0.0; public static String[] serverIPs = { "192.168.3.11", // ssh -i /Users/olivertownsend/Desktop/turtlebeards.pem ec2-user@192.168.3.11 "172.16.17.32", // ssh -i /Users/olivertownsend/Desktop/turtlebeards_california.pem ec2-user@172.16.17.32 "172.16.17.32", // ssh -i /Users/olivertownsend/Desktop/turtlebeards_ireland.pem ec2-user@172.16.17.32 "172.16.58.3", // ssh -i /Users/olivertownsend/Desktop/turtlebeards_singapore.pem ec2-user@172.16.58.3 "192.168.3.11"}; //ssh -i /Users/olivertownsend/Desktop/turtlebeards_saopaulo.pem ec2-user@192.168.3.11 // public static String[] serverIPs = { // "192.168.3.11", // ssh -i /Users/wdai/Desktop/turtlebeards.pem ec2-user@192.168.3.11 // "172.16.17.32", // ssh -i /Users/wdai/Desktop/turtlebeards.pem ec2-user@172.16.17.32 // "192.168.3.11", // ssh -i /Users/wdai/Desktop/turtlebeards.pem ec2-user@192.168.3.11 // "172.16.31.10", // ssh -i /Users/wdai/Desktop/turtlebeards.pem ec2-user@172.16.31.10 // "172.16.58.3"}; //ssh -i /Users/wdai/Desktop/turtlebeards.pem ec2-user@172.16.58.3 // public static String[] serverIpPrivate = { // "ec2-54-174-167-183.compute-1.amazonaws.com", // "ec2-54-174-226-59.compute-1.amazonaws.com", // "ec2-54-86-223-159.compute-1.amazonaws.com", // "ec2-54-174-201-123.compute-1.amazonaws.com", // "ec2-54-174-164-18.compute-1.amazonaws.com" // }; public static int[] serverPorts = {12100, 12101, 12102, 12103, 12104}; public static int[] logSizes = {0, 0, 0, 0, 0}; public static int logPort = 1220; static boolean listenerTrue = true; String clientSentence, capitalizedSentence; Socket csocket; static ServerSocket welcomeSocket, logSocket; static Thread listenerThread, logThread; ClientServer(Socket csocket) { this.csocket = csocket; } public static void main(String[] args) throws Exception { System.out.println("~~~~~~~~~~~~~~~~~~~" + " CS271 Paxos " + "~~~~~~~~~~~~~~~~~~~"); String inputLine = null; InputStreamReader isr = new InputStreamReader(System.in); BufferedReader br = new BufferedReader(isr); // this is assigning the local server id serverId = Integer.parseInt(args[0]); System.out.println("Starting server with Id: " + serverId); // Listener thread stuff listenerThread = new Thread() { public void run() { System.out.println("Waiting for clients..."); try { // init welcomeSocket ONLY once welcomeSocket = new ServerSocket(serverPorts[serverId]); } catch (IOException ex) { System.out.println("Welcome socket: " + ex); } while (true) { if (!listenerTrue) { try { // don't listen welcomeSocket.close(); } catch (IOException ex) { System.out.println(ex); } } else { try { if (welcomeSocket.isClosed()) { welcomeSocket = new ServerSocket(serverPorts[serverId]); } Socket connectionSocket = new Socket(); connectionSocket.setSoTimeout(100); connectionSocket = welcomeSocket.accept(); // System.out.println("Connected."); new Thread(new ClientServer(connectionSocket)).start(); } catch (IOException ex) { System.out.println("Connection socket: " + ex); ex.printStackTrace(); } } } } }; // log port thread stuff logThread = new Thread() { public void run() { try { // init log ONLY once logSocket = new ServerSocket(logPort); } catch (IOException ex) { System.out.println("logSocket: " + ex); } while (true) { if (!listenerTrue) { try { // don't listen logSocket.close(); } catch (IOException ex) { System.out.println(ex); } } else { try { Socket newSock = new Socket(); newSock.setSoTimeout(100); newSock = logSocket.accept(); System.out.println("Connected to log port"); // now can receive data InputStream socketStream = newSock.getInputStream(); ObjectInputStream objectInput = new ObjectInputStream(socketStream); Vector<String> receivedLog = (Vector<String>) objectInput.readObject(); newSock.close(); Log.transactionLog = receivedLog; System.out.println("This is the received catch-up log: " + Log.transactionLog); Log.rebuildLog(); } catch (IOException ex) { System.out.println("newSock socket: " + ex); } catch (ClassNotFoundException ex) { Logger.getLogger(ClientServer.class.getName()).log(Level.SEVERE, null, ex); } } } } }; // Heartbeat thread stuff Thread heartBeatThread = new Thread() { public void run() { while (true) { // runs forever in a loop, and waits for let's say, 3 sec before running again? try { // System.out.println("Entering heartbeat thread..."); HeartBeat.pingAll(); // this should update the "numProc" int in HeartBeat.java // wait 3s sleep(3000); HeartBeat.countAliveServers(); } catch (IOException | InterruptedException ex) { // System.out.println(ex); } catch (Exception ex) { Logger.getLogger(ClientServer.class.getName()).log(Level.SEVERE, null, ex); } } } }; // Queue thread stuff Thread queueWatchdogThread = new Thread() { public void run() { while (true) { // System.out.println("Starting queuewatcher thread"); paxosQueueObj.queueWatcher(); try { sleep(4000); } catch (InterruptedException ex) { Logger.getLogger(ClientServer.class.getName()).log(Level.SEVERE, null, ex); } } } }; // Start the listener thread listenerThread.start(); // Start the heartbeat thread heartBeatThread.start(); // Start the queue thread queueWatchdogThread.start(); // Start the log thread logThread.start(); // File f = new File(Log.path); // if (f.exists() && !f.isDirectory()) { sleep(3000); // if a log file is there ctrlc = 1; System.out.println("Checking for previous log..."); int size = Log.transactionLog.size(); // System.out.println("local size of log: " + size); if (HeartBeat.numProc == 1) { // rebuild from self rebuildFromSelf(); } else { String poll = "sizepoll " + serverId; sendPollToAll(poll); } // Start main thread for input { while (true) { System.out.print("> "); try { inputLine = br.readLine(); } catch (IOException e) { e.printStackTrace(); } String regex = "(?=\\()|(?<=\\)\\d)"; String[] input = inputLine.split(regex); // System.out.println(Arrays.toString(input)); if (input[0].equals("deposit")) { double amount; try { input[1] = (input[1].substring(1, input[1].length() - 1)); System.out.println("Depositing: " + input[1]); // Adding to queue paxosQueueObj.transactionQueue.add(input); } catch (Exception e) { System.out.println("Try deposit: " + e); e.printStackTrace(); } } else if (input[0].equals("withdraw")) { double amount; try { input[1] = (input[1].substring(1, input[1].length() - 1)); if (logObject.balance < Double.parseDouble(input[1])) { // Nonsufficient funds System.out.println("Withdraw of: " + input[1] + " failed. Insufficient funds."); } else { // Adding to queue System.out.println("Withdrawing: " + input[1]); paxosQueueObj.transactionQueue.add(input); } } catch (Exception e) { System.out.println("Invalid command."); } } else if (input[0].equals("balance")) { System.out.println("Balance is: " + logObject.getBalance()); } else if (input[0].equals("fail")) { System.out.println("Failing..."); fail(); } else if (input[0].equals("unfail")) { System.out.println("Unfailing..."); unfail(); } else if (input[0].equals("print")) { logObject.printLog(); } else if (input[0].equals("heartbeat")) { System.out.println("LifeTable: " + Arrays.toString(HeartBeat.lifeTable)); } else if (input[0].equals("printq")) { PaxosQueue.printQ(); } // added simply for testing else if (input[0].equals("send")) { // send message input[1] to server at port input[2] String server = input[2].substring(1, input[2].length() - 1); sendTo(input[1], server); } else if (input[0].equals("quit")) { System.out.println("Quitting..."); System.exit(0); } } } } public void run() { try { // Handler thread runnable // System.out.println("Spawning new handler thread..."); String clientSentence; BufferedReader inFromClient = new BufferedReader(new InputStreamReader(csocket.getInputStream())); clientSentence = inFromClient.readLine(); if ((clientSentence != null) && (!clientSentence.isEmpty())) { // System.out.println("Received: " + clientSentence); paxosObject.handleMsg(clientSentence); } else { // System.out.println("Thump"); } } catch (IOException e) { System.out.println(e); } catch (Exception ex) { Logger.getLogger(ClientServer.class.getName()).log(Level.SEVERE, null, ex); } } public static void fail() { System.out.println("USER FAIL: Stopping the listener thread."); listenerTrue = false; } public static void unfail() throws Exception { System.out.println("USER UNFAIL: Starting the listener thread again."); // begin listening again listenerTrue = true; // get size from local log int size = Log.transactionLog.size(); System.out.println("local size of log: " + size); // poll others for largest size String pollMsg = "sizepoll " + serverId; sendPollToAll(pollMsg); } public static void sendTo(String m, String serverId) throws Exception { int server_id = Integer.parseInt(serverId); int p = serverPorts[server_id]; String serverName = serverIPs[server_id]; // System.out.println("Sending " + m + " to: " + serverName + " on port: " + p); Socket clientSocket = new Socket(serverName, p); //serverPorts[leader]); DataOutputStream outToServer = new DataOutputStream(clientSocket.getOutputStream()); BufferedReader inFromServer = new BufferedReader(new InputStreamReader(clientSocket.getInputStream())); outToServer.writeBytes(m); clientSocket.close(); // System.out.println("Finished sending."); } public static void sendToAll(String prepareMsg) throws Exception { for (int i = 0; i < 5; ++i) { if (HeartBeat.lifeTable[i] == 1) { // System.out.println("Heartbeat at: " + i); // System.out.println("Sending to" + serverIPs[i] + ":" + serverPorts[i]); int p = serverPorts[i]; Socket clientSocket = new Socket(serverIPs[i], p); DataOutputStream outToServer = new DataOutputStream(clientSocket.getOutputStream()); BufferedReader inFromServer = new BufferedReader(new InputStreamReader(clientSocket.getInputStream())); outToServer.writeBytes(prepareMsg); clientSocket.close(); } } } public static void sendPollToAll(String prepareMsg) throws Exception { for (int i = 0; i < 5; ++i) { if (HeartBeat.lifeTable[i] == 1) { if (i == serverId) { } else { // System.out.println("Heartbeat at: " + i); // System.out.println("Sending to" + serverIPs[i] + ":" + serverPorts[i]); int p = serverPorts[i]; Socket clientSocket = new Socket(serverIPs[i], p); DataOutputStream outToServer = new DataOutputStream(clientSocket.getOutputStream()); BufferedReader inFromServer = new BufferedReader(new InputStreamReader(clientSocket.getInputStream())); outToServer.writeBytes(prepareMsg); clientSocket.close(); } } } } public static void requestLog() throws Exception { int chosenServer = serverId; // System.out.println(Arrays.toString(logSizes)); for (int i = 0; i < logSizes.length; i++) { if (logSizes[i] > logSizes[serverId]) { //Log.transactionLog.size()) { chosenServer = i; } } if (chosenServer != serverId) { // System.out.println("Requesting from: " + chosenServer); // request log from other server String requestLog = "requestlog " + serverId; sendTo(requestLog, Integer.toString(chosenServer)); } else { // System.out.println("rebuilding from self"); File f = new File(Log.path); if (f.exists() && !f.isDirectory()) { // System.out.println("Reading from file"); // else rebuild from yourself BufferedReader br = new BufferedReader(new FileReader(Log.path)); String line; Log.currIndex = 0; while ((line = br.readLine()) != null) { // System.out.println("line: " + line); // process the line. String[] split = line.split(" "); String nullString1 = ""; String nullString2 = ""; Log.transactionLog.add(nullString1); Log.transactionLog.add(nullString2); Log.transactionLog.add(Log.currIndex, line); Log.currIndex++; Log.updateBalance(split[0], Double.parseDouble(split[1])); } br.close(); } } } public static void rebuildFromSelf() throws Exception { // System.out.println("rebuilding from self"); File f = new File(Log.path); if (f.exists() && !f.isDirectory()) { // System.out.println("Reading from file"); // else rebuild from yourself BufferedReader br = new BufferedReader(new FileReader(Log.path)); String line; Log.currIndex = 0; while ((line = br.readLine()) != null) { // System.out.println("line: " + line); // process the line. String[] split = line.split(" "); String nullString1 = ""; String nullString2 = ""; Log.transactionLog.add(nullString1); Log.transactionLog.add(nullString2); Log.transactionLog.add(Log.currIndex, line); Log.currIndex++; Log.updateBalance(split[0], Double.parseDouble(split[1])); } br.close(); } System.out.println("Finished updating."); } } <file_sep>/clientServer/src/clientserver/Paxos.java package clientserver; import static clientserver.Log.transactionLog; import java.io.IOException; import java.io.Serializable; import static java.lang.Thread.sleep; import java.util.Arrays; import java.util.Vector; import java.util.logging.Level; import java.util.logging.Logger; public class Paxos { public class Value { double amount = 0.0; String type = "blank"; int logPosition = -1; int balNum = -1; int balNumServerId = -1; } public class Bucket { int numAccepts = 0; Paxos.Value v = new Paxos.Value(); } // keep track of number of acks - check when matches majority value double ackCount = 0; // generateNum is incremeneted by 1 each round - used f]to generate proposal numbers in conjunction with serverID int generateNum = 0; // value object that is created at beginning of proposal Value val = new Value(); // set when a cohort accepts a proposed value Value acceptedVal = new Value(); // monitor highest val received that's not blank Value highestVal = new Value(); // never initialize Value blankVal = new Value(); // counter for final accepts public static Bucket[] ackBucketB = new Bucket[5]; public static int[] finalAcceptBucket = {0, 0, 0, 0, 0}; int numFinalA = 0; public static Vector<Value> ackedValues = new Vector(); public static int[] ackedValBals; int minBallotNum = 0; int minBallotNumServerId = 0; //TESTING THIS? WHAT DO I SET THIS TO??? boolean leader = false; boolean phase2 = false; boolean var = false; /* LEADER'S PERSPECTIVE Called when leader loses election. Will generate a new proposal number (prepareMsg) and use the same Value object val in a new round of prepares. */ // public void regeneratePrepare() throws Exception { // String prepareMsg = "prepare " + generateNum + " " + ClientServer.serverId; // ClientServer.sendToAll(prepareMsg); // // generateNum++; // } /* LISTENER'S PERSPECTIVE Is called when server receives messages in ClientServer from other servers. Will call various other handler methods based on message. */ public void handleMsg(String msg) throws Exception { String[] message = msg.split(" "); if (message[0].equals("prepare")) { handlePrepare(message); } else if (message[0].equals("ack")) { // handleAck(message); handleAckNew(message); } else if (message[0].equals("accept")) { handleAccept(message); } else if (message[0].equals("finalaccept")) { handleFinalAccept(message); } else if (message[0].equals("sizepoll")) { handleSizeRequest(message); } else if (message[0].equals("requestlog")) { Log.sendLog(message[1]); } else if (message[0].equals("mysize")) { handleSizeResponse(message); } } /* LEADER'S PERSPECTIVE Receives 'msg' from ClientServer and uses it to generate a Value object val to be proposed to all other servers. */ public void prepareMsg(String[] message) { generateNum++; leader = true; //String[] message = msg.split(" "); val.type = message[0]; val.amount = Double.parseDouble(message[1]); val.logPosition = Log.currIndex; val.balNum = generateNum; val.balNumServerId = ClientServer.serverId; String prepareMsg = "prepare " + val.balNum + " " + val.balNumServerId; try { System.out.println("Sending prepareMsg"); ClientServer.sendToAll(prepareMsg); } catch (Exception ex) { ex.printStackTrace(); } } /* COHORT'S PERSPECTIVE If receive a prepare message from some server, check if you haven't already agreed to higher ballot number. Regardless if you have or have not, reply with ack and most recent/highest ballot number seen so far with its value. If haven't accepted value */ public void handlePrepare(String[] message) { int ballotNum = Integer.parseInt(message[1]); int ballotNumServerId = Integer.parseInt(message[2]); if ((ballotNum > minBallotNum) || ((ballotNum == minBallotNum) && (ballotNumServerId > minBallotNumServerId))) { minBallotNum = ballotNum; minBallotNumServerId = ballotNumServerId; System.out.println("IFSTATEMENT"); /* TODO: If have already accepted proposal - set reply Value value to this val, otherwise, set it to null so handleAck will know if some other erver has accepted a value or not. */ } // String reply = ""; // if (acceptedVal.type.equals("blank")) { // reply // = "ack " // + ballotNum + " " // + ballotNumServerId + " " // + val.balNum + " " // + val.balNumServerId + " " // + val.type + " " // + val.amount + " " // + val.logPosition; // } else { String reply = "ack " + ballotNum + " " + ballotNumServerId + " " + acceptedVal.balNum + " " + acceptedVal.balNumServerId + " " + acceptedVal.type + " " + acceptedVal.amount + " " + acceptedVal.logPosition; // } System.out.println("handlePrepare from server " + ClientServer.serverId + ": " + reply); try { System.out.println("Sending ack"); ClientServer.sendTo(reply, Integer.toString(ballotNumServerId)); } catch (Exception ex) { System.out.println(ex); } } public void handleAckNew(String[] message) { int receivedBalNum = Integer.parseInt(message[3]); int receivedBalNumServerId = Integer.parseInt(message[4]); int ourReceivedBalNum = Integer.parseInt(message[1]); int ourReceivedBalNumServerId = Integer.parseInt(message[2]); if ( (ourReceivedBalNum == val.balNum) && (ourReceivedBalNumServerId == val.balNumServerId) ) { Value receivedVal = new Value(); receivedVal.type = message[5]; receivedVal.amount = Double.parseDouble(message[6]); receivedVal.logPosition = Integer.parseInt(message[7]); receivedVal.balNum = receivedBalNum; receivedVal.balNumServerId = receivedBalNumServerId; ackCount++; double majority = (double) ackCount / (HeartBeat.numProc); System.out.println("Received ack # " + ackCount + " from: " + receivedBalNum + " " + receivedBalNumServerId); // if someone has already accepted a value this round if (!receivedVal.type.equals("blank")) { System.out.println("Not blank! Already accepted value"); // if this received not-blank value is higher than previous not-blank value in this round, reset highest if ((receivedVal.balNum > highestVal.balNum) || ((receivedVal.balNum == highestVal.balNum) && (receivedVal.balNumServerId > highestVal.balNumServerId))) { highestVal.type = receivedVal.type = message[5]; highestVal.amount = receivedVal.amount; highestVal.logPosition = receivedVal.logPosition; highestVal.balNum = receivedVal.balNum; highestVal.balNumServerId = receivedVal.balNumServerId; } } if ((majority > 0.5) && (HeartBeat.numProc >= 3)) { if (phase2) { } else { System.out.println("majority: " + majority); System.out.println("Have reached majority"); // stop handling acks phase2 = true; // if there was an already accepted value, propose it // lost this round if (!highestVal.type.equals("blank")) { // !valsAreEqual(highestVal, blankVal)) { System.out.println("Server " + ClientServer.serverId + " lost, Sending concede."); String concedeMsg = "accept " + val.balNum + " " + val.balNumServerId + " " + highestVal.type + " " + highestVal.amount + " " + highestVal.logPosition; // ackCount = 0; phase2 = true; resetHighestVal(); System.out.println("1 Highest val now: " + highestVal.type); try { // Accept the higher ballot System.out.println("Sending concede accept"); ClientServer.sendToAll(concedeMsg); // Try to prepare another proposal generateNum = receivedBalNum + 1; // regeneratePrepare(); } catch (Exception ex) { System.out.println(ex); } } else if (highestVal.type.equals("blank")) { // won this round System.out.println("Server " + ClientServer.serverId + " won, Sending win msg."); System.out.println("Winning val: " + val.type + " " + val.amount + " " + val.logPosition); String winMsg = "accept " + val.balNum + " " + val.balNumServerId + " " + val.type + " " + val.amount + " " + val.logPosition; resetHighestVal(); System.out.println("2 Highest val now: " + highestVal.type); // ackCount = 0; phase2 = true; try { // I won System.out.println("We have a consensus, broadcasting out the winMsg: " + winMsg); ClientServer.sendToAll(winMsg); } catch (Exception ex) { System.out.println(ex); } } resetHighestVal(); } } else if (HeartBeat.numProc < 3) { System.out.println("Cannot reach majority, not enough servers."); } } } public boolean valsAreEqual(Value a, Value b) { return (a.type.equals(b.type) && a.amount == b.amount && a.balNum == b.balNum && a.balNumServerId == b.balNumServerId && a.logPosition == b.logPosition); } public void resetHighestVal() { highestVal.amount = blankVal.amount; highestVal.type = blankVal.type; highestVal.logPosition = blankVal.logPosition; highestVal.balNum = blankVal.balNum; highestVal.balNumServerId = blankVal.balNumServerId; } public void resetAcceptedVal() { acceptedVal.amount = blankVal.amount; acceptedVal.type = blankVal.type; acceptedVal.logPosition = blankVal.logPosition; acceptedVal.balNum = blankVal.balNum; acceptedVal.balNumServerId = blankVal.balNumServerId; } /* COHORT'S PERSPECTIVE (Possibly set acceptedVal to default values/null so next election round knows that nothing has been accepted so far in new round) */ public void handleAccept(String[] message) { int receivedBalNum = Integer.parseInt(message[1]); int receivedBalNumServerId = Integer.parseInt(message[2]); // compare ballot number(receivedBalNum) and the server ID(receivedBalNumServerId if ((receivedBalNum > minBallotNum) || ((receivedBalNum == minBallotNum) && (receivedBalNumServerId >= minBallotNumServerId))) { acceptedVal.type = message[3]; acceptedVal.amount = Double.parseDouble(message[4]); acceptedVal.logPosition = Integer.parseInt(message[5]); acceptedVal.balNum = receivedBalNum; acceptedVal.balNumServerId = receivedBalNumServerId; System.out.println("handleAccept acceptedVal: " + acceptedVal.type + " " + acceptedVal.amount + " " + acceptedVal.logPosition); minBallotNum = receivedBalNum; minBallotNumServerId = receivedBalNumServerId; String cohortAcceptMsg = "finalaccept " + receivedBalNum + " " + receivedBalNumServerId + " " + acceptedVal.type + " " + acceptedVal.amount + " " + acceptedVal.logPosition; try { System.out.println("Broadcasting final accept"); ClientServer.sendToAll(cohortAcceptMsg); } catch (Exception ex) { System.out.println(ex); } } } public void handleFinalAccept(String[] message) { // We're done if we get this step (if we get final accepts from ALL servers), save the final value // numFinalA++; int receivedBalNum = Integer.parseInt(message[1]); int receivedBalNumServerId = Integer.parseInt(message[2]); int serverIndex = receivedBalNumServerId; System.out.println("handleFinalAccept message: " + Arrays.toString(message)); // ackBucket[serverIndex].numAccepts++; finalAcceptBucket[serverIndex]++; System.out.println("Bucket of : " + ClientServer.serverId + " " + Arrays.toString(finalAcceptBucket)); // // if ((receivedBalNum > minBallotNum) // || ((receivedBalNum == minBallotNum) && (receivedBalNumServerId >= minBallotNumServerId))) { // // // do nothing, keep in queue // // } if (finalAcceptBucket[serverIndex] == HeartBeat.numProc) { acceptedVal.type = message[3]; acceptedVal.amount = Double.parseDouble(message[4]); acceptedVal.logPosition = Integer.parseInt(message[5]); Log.addToTransactionLog(acceptedVal); System.out.println("Decided on: " + acceptedVal.amount); finalAcceptBucket[serverIndex] = 0; leader = false; if ( (serverIndex == ClientServer.serverId) && (val.amount == acceptedVal.amount) ) { ackCount = 0; System.out.println("Accepting: " + acceptedVal.type + " " + acceptedVal.amount); PaxosQueue.printQ(); ClientServer.paxosQueueObj.transactionQueue.removeElementAt(0); PaxosQueue.printQ(); } // reset for next iteration phase2 = false; ackCount = 0; ClientServer.paxosQueueObj.isProposing = false; resetAcceptedVal(); System.out.println("acceptedVal now: " + acceptedVal.type + " " + acceptedVal.amount); System.out.println("Balance is : "+ Log.balance); System.out.println("========================== DONE WITH ROUND ========================== \n\n"); } try { sleep(1000); } catch (InterruptedException ex) { Logger.getLogger(Paxos.class.getName()).log(Level.SEVERE, null, ex); } } public static void handleSizeRequest(String[] request) throws Exception { // send size of local log back to server with attached ID int size = Log.transactionLog.size(); String sizeResponse = "mysize " + size + " " + ClientServer.serverId; ClientServer.sendTo(sizeResponse, request[1]); } public static void handleSizeResponse(String[] response) throws Exception { // if local is up to date, import data int size = Integer.parseInt(response[1]); int server = Integer.parseInt(response[2]); System.out.println("Server: " + server + " size is: " + size); ClientServer.logSizes[server] = size; ClientServer.heardFrom++; System.out.println("heardfrom: " + ClientServer.heardFrom); System.out.println("numproc: " + HeartBeat.numProc); if (ClientServer.heardFrom == HeartBeat.numProc - ClientServer.ctrlc) { System.out.println("About to enter reqlog"); ClientServer.requestLog(); ClientServer.heardFrom = 0; } // else, get data from the most up to date process // also need to prevent user from sending messages? } } <file_sep>/README.md cs271_proj1_java ================ CS 271 Project TODO: • create scheme for dealing with server IDs • fail/unfail methods • create log catch up mechanism • be able to write log to disk (serializability) ⁃ write to disk after every paxos transaction • add queue functionality <file_sep>/deploy.cpp #include <sys/types.h> #include <sys/stat.h> #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <syslog.h> #include <string.h> #include <ctime> #include <iostream> #include <string> #include <cstdio> #include <iostream> #include <fstream> using namespace std; // Exec function std::string exec(const char* cmd) { FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[128]; std::string result = ""; while(!feof(pipe)) { if(fgets(buffer, 128, pipe) != NULL) result += buffer; } pclose(pipe); return result; } int main(void) { // init addrs static const string address[] = { "172.16.58.3", "172.16.31.10", "192.168.127.12", "172.16.31.10", "192.168.3.11" }; static const string cert[] = { "turtlebeards.pem", "turtlebeards_california.pem", "turtlebeards_ireland.pem", "turtlebeards_singapore.pem", "turtlebeards_saopaulo.pem" }; // scp -r -i ~/Desktop/turtlebeards.pem ~/Dropbox/Current\ Documents/cs271/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@172.16.58.3:/home/ec2-user/ for(int i = 0; i < 5; i++) { cout << "> DEPLOYING FILES TO REMOTE HOST" << endl; string cmd = "scp -r -i ~/Desktop/"+ cert[i] +" ~/Dropbox/Current\\ Documents/cs271/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@"+address[i]+":/home/ec2-user/"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); string result = exec(cmd.c_str()); cout << "> DELETING LOGS" << endl; cmd = "ssh -i /Users/wdai/Desktop/"+ cert[i] +" ec2-user@"+address[i]+" 'rm log.txt'"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); // sleep(1); } exit(EXIT_SUCCESS); } <file_sep>/Makefile all: deploy.cpp g++ -g -w -Wall -m32 -o deploy deploy.cpp g++ -g -w -Wall -m32 -o modpaxdeploy modpaxdeploy.cpp clean: $(RM) deploy $(RM) modpaxdeploy <file_sep>/modPaxos/src/modpaxos/PaxosQueue.java /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package modpaxos; import java.util.Arrays; import java.util.Vector; /** * * @author olivertownsend */ public class PaxosQueue { public static Vector<String[]> transactionQueue; public static boolean isProposing = false; public PaxosQueue() { transactionQueue = new Vector<String[]>(); } public static void printQ() { for (int i = 0; i < transactionQueue.size(); ++i) { String[] val = transactionQueue.elementAt(i); // System.out.println("Queue " + i + ": " + Arrays.toString(val)); } } public static void queueWatcher() { // if there are items in the queue and if Paxos isn't currently proposing a value, then propose value if ((!transactionQueue.isEmpty()) && (!isProposing) && (HeartBeat.leaderId != ClientServer.serverId)) { if (!transactionQueue.isEmpty()) { String[] newTrans = transactionQueue.firstElement(); // System.out.println(Arrays.toString(newTrans)); // bypass acks, always leader try { String cohortProposal = "cohort " + newTrans[0] + " " + newTrans[1]; ClientServer.sendTo(cohortProposal, Integer.toString(HeartBeat.leaderId)); transactionQueue.remove(0); // ClientServer.paxosObject.prepareMsg(newTrans); } catch (Exception ex) { System.out.println("queueWatcher:" + ex); } } // if leader } else if ((!transactionQueue.isEmpty()) && (!isProposing) && (HeartBeat.leaderId == ClientServer.serverId)) { if (!transactionQueue.isEmpty()) { String trans[] = transactionQueue.firstElement(); // System.out.println("TRANS: "+Arrays.toString(trans)); String winMsg = "accept " + trans[0] + " " + trans[1] + " " + Log.currIndex; try { isProposing = true; ClientServer.sendToAll(winMsg); } catch (Exception ex) { System.out.println("senttoall: "+ex); } } } } } <file_sep>/deploy_ot.cpp #include <sys/types.h> #include <sys/stat.h> #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <syslog.h> #include <string.h> #include <ctime> #include <iostream> #include <string> #include <cstdio> #include <iostream> #include <fstream> using namespace std; // Exec function std::string exec(const char* cmd) { FILE* pipe = popen(cmd, "r"); if (!pipe) return "ERROR"; char buffer[128]; std::string result = ""; while(!feof(pipe)) { if(fgets(buffer, 128, pipe) != NULL) result += buffer; } pclose(pipe); return result; } int main(void) { // init addrs static const string address[] = { "172.16.58.3", "172.16.31.10", "172.16.31.10", "172.16.58.3", "172.16.31.10" }; // scp -r -i ~/Desktop/turtlebeards.pem ~/Dropbox/Current\ Documents/cs271/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@172.16.58.3:/home/ec2-user/ //for(int i = 0; i < 5; i++) { string cmd = "scp -r -i ~/Desktop/turtlebeards.pem /Users/olivertownsend/NetBeansProjects/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@"+address[0]+":/home/ec2-user/"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); string result = exec(cmd.c_str()); cout << "> DELETING LOGS" << endl; cmd = "ssh -i /Users/olivertownsend/Desktop/turtlebeards.pem ec2-user@"+address[0]+" 'rm log.txt'"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cmd = "scp -r -i ~/Desktop/turtlebeards_california.pem /Users/olivertownsend/NetBeansProjects/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@"+address[1]+":/home/ec2-user/"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cout << "> DELETING LOGS" << endl; cmd = "ssh -i /Users/olivertownsend/Desktop/turtlebeards_california.pem ec2-user@"+address[1]+" 'rm log.txt'"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cmd = "scp -r -i ~/Desktop/turtlebeards_ireland.pem /Users/olivertownsend/NetBeansProjects/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@"+address[2]+":/home/ec2-user/"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cout << "> DELETING LOGS" << endl; cmd = "ssh -i /Users/olivertownsend/Desktop/turtlebeards_ireland.pem ec2-user@"+address[2]+" 'rm log.txt'"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cmd = "scp -r -i ~/Desktop/turtlebeards_singapore.pem /Users/olivertownsend/NetBeansProjects/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@"+address[3]+":/home/ec2-user/"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cout << "> DELETING LOGS" << endl; cmd = "ssh -i /Users/olivertownsend/Desktop/turtlebeards_singapore.pem ec2-user@"+address[3]+" 'rm log.txt'"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cmd = "scp -r -i ~/Desktop/turtlebeards_saopaulo.pem /Users/olivertownsend/NetBeansProjects/cs271_proj1_java/clientServer/dist/clientServer.jar ec2-user@"+address[4]+":/home/ec2-user/"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); cout << "> DELETING LOGS" << endl; cmd = "ssh -i /Users/olivertownsend/Desktop/turtlebeards_saopaulo.pem ec2-user@"+address[4]+" 'rm log.txt'"; cout.write(cmd.c_str(), strlen(cmd.c_str())); cout.put('\n'); result = exec(cmd.c_str()); // sleep(1); //} exit(EXIT_SUCCESS); }
3264df3d33bebb6218960ce9ed259c0746d64e40
[ "Markdown", "Java", "Makefile", "C++" ]
8
C++
otownsend92/cs271_proj1_java
389293627eb92d7e68efee6bf0400dfb0b8fb581
3a2d64f768fa14e10c0cca4b2b216fefda51fb14
refs/heads/master
<repo_name>AntonyBlueboy/SpotterTest<file_sep>/app/src/main/java/com/rudolf/spottertest/ArtilleryGameInterface.java package com.rudolf.spottertest; import android.os.Parcelable; /** * Created by антон on 11.05.2016. */ public interface ArtilleryGameInterface extends Parcelable{ // тут был метод проверявший исчисленые бойцом коэфициенты, но для разных пристрелок его нужно было перегружать, так что я его убрал. // метод выдает игроку наблюдения в виде обьека Burst AbstractBurst getBurst(); // метод проверяет корректуру принятую в виде обьекта Correcture. // Если корректура идентична текущей, то возвращает тот же самый обьект, если не верна, возвращает новый. Correcture getCorrection(); //Обновляет статистику стрельбы игрока void RefreshStats(boolean isCorrect); //Возвращает описанее боевого порядка String getFormotion(); }<file_sep>/app/src/main/java/com/rudolf/spottertest/AbstractBurst.java package com.rudolf.spottertest; /** * Created by антон on 22.05.2016. */ public interface AbstractBurst { } <file_sep>/app/src/main/java/com/rudolf/spottertest/ArtilleryGrids.java package com.rudolf.spottertest; /** * Created by антон on 11.06.2016. * Класс для работы с тысячными */ public class ArtilleryGrids { //Принимает угол в обычной записи, а возвращает в записи тысячных public static String getAngletoGrids(double angle) { int temp = (int) angle; int smallInt = ((int)((angle/100-temp/100)*100)); int bigInt = temp/100; // костыль, призваный убрать непонятную потерю одной малой тысячной if ((bigInt*100 + smallInt) != temp) smallInt++; String small = smallInt + ""; while (small.length() < 2) small = "0" + small; StringBuilder result = new StringBuilder(); result.append(bigInt).append("-").append(small); return result.toString(); } //Принимает строку с углом в тысячных, а возвращает числом public static int getGridToAngle(String gridAngle) throws WrongGridFormatException{ try { String[] grids = gridAngle.split("-"); int angle = Integer.parseInt(grids[0]) * 100 + Integer.parseInt(grids[1]); return angle; } catch (Exception e) {throw new WrongGridFormatException();} } } <file_sep>/app/src/main/java/com/rudolf/spottertest/BurstForDO.java package com.rudolf.spottertest; /** * Created by антон on 22.05.2016. */ public class BurstForDO implements AbstractBurst { boolean isLeftForLeft; int leftAngle; boolean isLeftForRight; int rightAngle; public BurstForDO(boolean isLeftForLeft, int leftAngle, boolean isLeftForRight, int rightAngle) { this.isLeftForLeft = isLeftForLeft; this.leftAngle = leftAngle; this.isLeftForRight = isLeftForRight; this.rightAngle = rightAngle; } public boolean isLeftForLeft() { return isLeftForLeft; } public int getLeftAngle() { return leftAngle; } public boolean isLeftForRight() { return isLeftForRight; } public int getRightAngle() { return rightAngle; } @Override public String toString() { String burstInfo = "(Левое КСП) Наблюдаю разрыв по цели!\n(Правое КСП) Наблюдаю разрыв по цели!"; if (leftAngle == 0 & rightAngle!=0) { burstInfo = "(Левое КСП) Наблюдаю разрыв по цели! \n(Правое КСП) Наблюдаю разрыв! " + (isLeftForRight ? ", лево " : ", право ")+ ArtilleryGrids.getAngletoGrids(rightAngle) + "!"; } else if (leftAngle != 0 & rightAngle==0) { burstInfo = "(Левое КСП) Наблюдаю разрыв! " + (isLeftForLeft ? "Лево " : "Право ") + ArtilleryGrids.getAngletoGrids(leftAngle) + "!\n(Правое КСП) Наблюдаю разрыв по цели!"; } else if ( leftAngle != 0 & rightAngle!=0) burstInfo = "(Левое КСП) Наблюдаю разрыв! " + (isLeftForLeft ? "Лево " : "Право ") + ArtilleryGrids.getAngletoGrids(leftAngle) + "! \n(Правое КСП) Наблюдаю разрыв! " + (isLeftForRight ? ", лево " : ", право ")+ ArtilleryGrids.getAngletoGrids(rightAngle) + "!"; return burstInfo; } } <file_sep>/app/src/main/java/com/rudolf/spottertest/RangerFinderActivityChecking.java package com.rudolf.spottertest; import android.content.Intent; import android.renderscript.Double2; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.view.View; import android.widget.Button; import android.widget.EditText; import android.widget.TextView; import android.widget.Toast; public class RangerFinderActivityChecking extends AppCompatActivity { ArtilleryGameRangefinderType game; TextView formation, resulText; EditText KUET, WUET, scaleET; Button checkingButton, startGameButton; boolean isUsed = false; int ValueOfScale; double KU; int WU; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_ranger_finder_activity_checking); formation = (TextView) findViewById(R.id.formationTextV); resulText = (TextView) findViewById(R.id.ResultCheckingText); game = getIntent().getParcelableExtra( ArtilleryGameRangefinderType.class.getCanonicalName()); formation.setText(game.getFormotion()); KUET = (EditText) findViewById(R.id.inputKU); WUET = (EditText) findViewById(R.id.inputWU); scaleET = (EditText) findViewById(R.id.inputScale); checkingButton = (Button) findViewById(R.id.toCheckButton); startGameButton = (Button) findViewById(R.id.startGameButton); final Intent playingActivityIntent = new Intent("com.spotter.RangerFinderGame"); checkingButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (isUsed) { Toast.makeText(RangerFinderActivityChecking.this, "Проверка уже проведена", Toast.LENGTH_SHORT).show(); return; } if (KUET.getText().toString().isEmpty() || WUET.getText().toString().isEmpty() || scaleET.getText().toString().isEmpty()) { Toast.makeText(RangerFinderActivityChecking.this, "Заполните все поля", Toast.LENGTH_SHORT).show(); return; } KU = Double.parseDouble(KUET.getText().toString()); try { WU = ArtilleryGrids.getGridToAngle(WUET.getText().toString()); } catch (WrongGridFormatException e) { Toast.makeText(RangerFinderActivityChecking.this, "ШУ введен в некорректном виде", Toast.LENGTH_SHORT).show(); return; } ValueOfScale = Integer.parseInt(scaleET.getText().toString()); game.setValueOfScale(ValueOfScale); boolean iscorrect = game.checkPreparingResult(KU , WU); if (iscorrect) resulText.setText("Рассчеты верны, можно начинать"); if (!iscorrect) resulText.setText(String.format("Рассчитано неверно.\n Запиши, КУ - %.1f, ШУ - %s.\n Можем начинать.", game.getMainDistanceCoef(), ArtilleryGrids.getAngletoGrids(game.getMainProtractorStep()) )); isUsed = true; } }); startGameButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (!isUsed) Toast.makeText(RangerFinderActivityChecking.this, "Сперва проведи проверку", Toast.LENGTH_SHORT).show(); if (isUsed) { playingActivityIntent.putExtra(ArtilleryGameRangefinderType.class.getCanonicalName(), game); startActivity(playingActivityIntent); } } }); } } <file_sep>/app/src/main/java/com/rudolf/spottertest/Correcture.java package com.rudolf.spottertest; /** * Created by антон on 11.05.2016. */ public class Correcture { private boolean isLower; private int distanceCorrection; private boolean isTotheLeft; private int angleCorrection; public Correcture(boolean isLower, int distanceCorrection, boolean isTotheLeft, int angleCorrection) { this.isLower = isLower; this.distanceCorrection = distanceCorrection; this.isTotheLeft = isTotheLeft; this.angleCorrection = angleCorrection; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Correcture that = (Correcture) o; if (isLower != that.isLower) return false; if (distanceCorrection != that.distanceCorrection) return false; if (isTotheLeft != that.isTotheLeft) return false; if (angleCorrection != that.angleCorrection) return false; return true; } @Override public int hashCode() { int result = (isLower ? 1 : 0); result = 31 * result + distanceCorrection; result = 31 * result + (isTotheLeft ? 1 : 0); result = 31 * result + angleCorrection; return result; } public boolean isLower() { return isLower; } public void setLower(boolean lower) { isLower = lower; } public int getDistanceCorrection() { return distanceCorrection; } public void setDistanceCorrection(int distanceCorrection) { this.distanceCorrection = distanceCorrection; } public boolean isTotheLeft() { return isTotheLeft; } public void setTotheLeft(boolean totheLeft) { isTotheLeft = totheLeft; } public int getAngleCorrection() { return angleCorrection; } public void setAngleCorrection(int angleCorrection) { this.angleCorrection = angleCorrection; } } <file_sep>/app/src/main/java/com/rudolf/spottertest/GridsGameAct.java package com.rudolf.spottertest; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.view.View; import android.widget.Button; import android.widget.EditText; import android.widget.TextView; import android.widget.Toast; public class GridsGameAct extends AppCompatActivity { Button distanceGameBtn, angleGameBtn, sizeGameBtn, checkingBtn; TextView taskInfo, answerInfo; EditText answerET; boolean isNotAnswered = false, isCorrect; GridsGame currentGame = new GridsGame(); int gameIndex, answer; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_grids_game); distanceGameBtn = (Button)findViewById(R.id.GGDistanceBtn); angleGameBtn = (Button)findViewById(R.id.GGAngleBtn); sizeGameBtn = (Button)findViewById(R.id.GGSizeBtn); checkingBtn = (Button)findViewById(R.id.GGCheckingBtn); taskInfo = (TextView)findViewById(R.id.TaskInfoText); answerInfo = (TextView)findViewById(R.id.GGanswerText); answerET = (EditText) findViewById(R.id.GGinputAnswer); distanceGameBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (isNotAnswered) Toast.makeText(GridsGameAct.this, "Реши предыдущую задачу", Toast.LENGTH_SHORT).show(); else { answerET.setText(""); String message = currentGame.getDistanceTask(); gameIndex = 1; isNotAnswered = true; taskInfo.setText(message); answerInfo.setText(""); } } }); angleGameBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (isNotAnswered) Toast.makeText(GridsGameAct.this, "Реши предыдущую задачу", Toast.LENGTH_SHORT).show(); else { answerET.setText(""); String message = currentGame.getAngleTask(); gameIndex = 2; isNotAnswered = true; taskInfo.setText(message); answerInfo.setText(""); } } }); sizeGameBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (isNotAnswered) Toast.makeText(GridsGameAct.this, "Реши предыдущую задачу", Toast.LENGTH_SHORT).show(); else { answerET.setText(""); String message = currentGame.getSizeTask(); gameIndex = 3; isNotAnswered = true; taskInfo.setText(message); answerInfo.setText(""); } } }); checkingBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (!isNotAnswered){ Toast.makeText(GridsGameAct.this, "Выбери задачу", Toast.LENGTH_SHORT).show(); return; } if (answerET.getText().toString().isEmpty()){ Toast.makeText(GridsGameAct.this, "Введите ответ", Toast.LENGTH_SHORT).show(); return; } else { try { switch (gameIndex) { case 1: answer = Integer.parseInt(answerET.getText().toString()); isCorrect = answer == currentGame.getDistance(); if (isCorrect) answerInfo.setText("Верный ответ. Получи новое задание"); else answerInfo.setText(String.format("Правильный ответ %d м", currentGame.getDistance())); isNotAnswered = false; break; case 2: try { answer = ArtilleryGrids.getGridToAngle(answerET.getText().toString()); isCorrect = answer == currentGame.getAngle(); if (isCorrect) answerInfo.setText("Верный ответ. Получи новое задание"); else answerInfo.setText("Правильный ответ " + ArtilleryGrids.getAngletoGrids(currentGame.getAngle())); isNotAnswered = false; break; } catch (WrongGridFormatException e) { Toast.makeText(GridsGameAct.this, "Неверный формат ввода", Toast.LENGTH_SHORT).show(); break; } case 3: answer = Integer.parseInt(answerET.getText().toString()); isCorrect = answer == currentGame.getSize(); if (isCorrect) answerInfo.setText("Верный ответ. Получи новое задание"); else answerInfo.setText(String.format("Правильный ответ %d м", (int) currentGame.getSize())); isNotAnswered = false; break; } } catch (NumberFormatException e){ Toast.makeText(GridsGameAct.this, "Неверный формат ввода", Toast.LENGTH_SHORT).show(); } } } }); } }
dfda8882331bd413d7d234aa32e90ccf328488f0
[ "Java" ]
7
Java
AntonyBlueboy/SpotterTest
7d623f063eaaf2ecb56fab4609ef8d03919ca2a4
380713c0fa8bed4bc78112be8c514efc6afa0f6b
refs/heads/master
<repo_name>marcelommarra/vox-alimentos<file_sep>/contato.php <!doctype html> <html> <head> <?php include'head.php';?> <!--::::::::: MENU MOBILE :::::::::--> <link rel="stylesheet" type="text/css" href="menu/1/default.css"> <link rel="stylesheet" type="text/css" href="menu/1/component.css"> <script type="text/javascript" async src="menu/1/ga.js"></script> <script src="menu/1/modernizr.custom.js"></script><style type="text/css"></style> </head> <body> <?php include'topo2.php';?> <div class="center"><div class="clearfix"> <div id="menu-empresa-titulo"> <div class="alinha6">QUER SABER MAIS? ENTRE EM CONTATO</div> <div id="grid12"><div class="line"></div></div> </div> <div id="grid5"> <label for="nome">Nome:</label> <input type= "text" name="nome" class="form1" required/> <br><br> <label for="email">E-mail:</label> <input type= "text" name="email" class="form1" required/> <br><br> <label for="telefone">Telefone:</label> <input type="text" name="telefone" class="form1"> <br><br> <label for="assunto">Assunto:</label> <input type="text" name="assunto" class="form1"> <br><br> <label for="assunto">Mensagem:</label> <textarea class='form2' name='mensagem'/></textarea> <div class="enviar"><input type="submit" class="enviar" value="ENVIAR"/></div> </div> <div id="grid6"> <strong>Endereço:</strong><br> Rua 2, n° 488, Setor Oeste <br> Goiânia/GO, CEP 74.110-130 <br><br><strong>Telefone:</strong><br> (62) 0000-0001 / (62) 0000-0002 <br><br><strong>Email:</strong><br> <EMAIL> <div id="grid12"><div class="menu-contato-icone-localizacao"><img src="img/loc.png"/></div></div> <div class="menu-contato-mapa"><img src="img/map.jpg" width="100%"/></div> </div></div></div> <?php include'footer.php';?> </body> </html><file_sep>/destaque.php <div id="institucional"> <div class="institucional-alto"></div> <div class="institucional-center"><div class="center"><div class="clearfix"> <div id="grid12"> <div id="titulo-institucional-center"><div class="alinha1">BEM-VINDOS A</div><div class="alinha2">VOX ALIMENTOS</div></div> <div class="titulo-institucional">BEM-VINDOS A VOX ALIMENTOS</div> </div> <div class="institucional-text"> Lorem Ipsum é simplesmente uma simulação de texto da indústria tipográfica e de impressos, e vem sendo utilizado desde o século XVI, quando um impressor desconhecido pegou uma bandeja de tipos e os embaralhou para fazer um livro de modelos de tipos. Lorem Ipsum sobreviveu não só a cinco séculos, como também ao salto para a editoração eletrônica, permanecendo essencialmente inalterado. Se popularizou na década de 60, quando a Letraset lançou decalques contendo passagens de Lorem Ipsum, e mais recentemente quando passou a ser integrado a softwares de editoração eletrônica <br><br> Porque nós o usamos? É um fato conhecido de todos que um leitor se distrairá com o conteúdo de texto legível de uma página quando estiver examinando sua diagramação. A vantagem de usar Lorem Ipsum é que ele tem uma distribuição normal de letras, ao contrário de "Conteúdo aqui, conteúdo aqui", fazendo com que ele tenha uma aparência similar a de um texto legível. Muitos softwares de publicação e editores de páginas na internet agora usam Lorem Ipsum como texto-modelo padrão, e uma rápida busca por 'lorem ipsum' mostra vários websites ainda em sua fase de construção. Várias versões novas surgiram ao longo dos anos, eventualmente por acidente, e às vezes de propósito (injetando humor, e coisas do gênero). </div> <div id="grid12"> <div class="institucional-plus"><a href="empresa"><img src="img/plus.png"/></a></div> </div></div></div></div> <div class="institucional-baixo"></div> </div> <div class="center"><div class="clearfix"> <div class="alinha3">PRODUTOS EM DESTAQUE</div> <div id="produtos-destaque"><?php include'produtos-destaque.php';?></div> <div id="grid12"><div class="produtos-destaque-flip"><a href="produtos"> <div class="cube"><div class="flippety"><h1>ACHOU POUCO?</h1></div><div class="flop"><h1>VER TODOS PRODUTOS</h1></div></div> </a></div></div></div></div><file_sep>/footer.php <div id="instagram-plugin"> <div id="grid12"><div class="instagram-plugin-icon"><img src="img/instagram-icon.png"/></div></div> <div id="instagram-fotos"><img src="img/plugin.jpg" width="100%"/></div> </div> <div id="infos"><div class="center"><div class="clearfix"><div class="cut1"> <div id="grid4"> <div id="grid12"><div class="infos-icon"><img src="img/telefone.png" width="100%"/></div></div> <div class="alinha4">(62) 0000-0000</div> </div> <div id="grid4"> <div id="grid12"><div class="infos-icon"><img src="img/whatsapp.png" width="100%"/></div></div> <div class="alinha4">(62) 0000-0000</div> </div> <div id="grid4"> <div id="grid12"><div class="infos-icon"><img src="img/localizacao.png" width="100%"/></div></div> <div class="alinha5">Rua 2, n° 488, Setor Oeste <br> Goiânia/GO, CEP 74.110-130</div> </div></div></div></div></div> <div id="footer"><div class="center"><div class="clearfix"> <div class="copyright">Todos direitos são reservados ao ... <?php $datahoje = date('y'); echo" 20$datahoje ";?></div> <div class="logoas"><a href='http://asweb.com.br' target='_blank' title='Desenvolvimento - AS Web'><img src="img/logoas.png"/></a></div></div></div></div></div><file_sep>/noticia-detalhe.php <!doctype html> <html> <head> <?php include'head.php';?> <!--::::::::: MENU MOBILE :::::::::--> <link rel="stylesheet" type="text/css" href="menu/1/default.css"> <link rel="stylesheet" type="text/css" href="menu/1/component.css"> <script type="text/javascript" async src="menu/1/ga.js"></script> <script src="menu/1/modernizr.custom.js"></script><style type="text/css"></style> </head> <body> <?php include'topo2.php';?> <div class="center"><div class="clearfix"> <div id="menu-empresa-titulo"> <div class="alinha6">DICAS E IDEIAS</div> <div class="alinha7">SUCOS</div> <div id="grid12"><div class="line"></div></div> </div> <div class="menu-noticias2-foto"><img src="img/noticia.jpg" width="100%"/></div> <div class="menu-noticias2-text"> It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout. The point of using Lorem Ipsum is that it has a more-or-less normal distribution of letters, as opposed to using 'Content here, content here', making it look like readable English. Many desktop publishing packages and web page editors now use Lorem Ipsum as their default model text, and a search for 'lorem ipsum' will uncover many web sites still in their infancy. Various versions have evolved over the years, sometimes by accident, sometimes on purpose (injected humour and the like). </div></div></div> <?php include'footer.php';?> </body> </html><file_sep>/empresa.php <!doctype html> <html> <head> <?php include'head.php';?> <!--::::::::: MENU MOBILE :::::::::--> <link rel="stylesheet" type="text/css" href="menu/1/default.css"> <link rel="stylesheet" type="text/css" href="menu/1/component.css"> <script type="text/javascript" async src="menu/1/ga.js"></script> <script src="menu/1/modernizr.custom.js"></script><style type="text/css"></style> </head> <body> <?php include'topo2.php';?> <div class="center"><div class="clearfix"> <div id="menu-empresa-titulo"> <div class="alinha6">VOX ALIMENTOS NO SEU DIA A DIA </div> <div id="grid12"><div class="line"></div></div> </div> <div class="menu-empresa-produto"><img src="img/produto.png" width="100%"/></div> <div class="menu-empresa-text"> Lorem Ipsum é simplesmente uma simulação de texto da indústria tipográfica e de impressos, e vem sendo utilizado desde o século XVI, quando um impressor desconhecido pegou uma bandeja de tipos e os embaralhou para fazer um livro de modelos de tipos. Lorem Ipsum sobreviveu não só a cinco séculos, como também ao salto para a editoração eletrônica, permanecendo essencialmente inalterado. Se popularizou na década de 60, quando a Letraset lançou decalques contendo passagens de Lorem Ipsum, e mais recentemente quando passou a ser integrado a softwares de editoração eletrônica como Aldus PageMaker. <br><br> Porque nós o usamos? É um fato conhecido de todos que um leitor se distrairá com o conteúdo de texto legível de uma página quando estiver examinando sua diagramação. A vantagem de usar Lorem Ipsum é que ele tem uma distribuição normal de letras, ao contrário de "Conteúdo aqui, conteúdo aqui", fazendo com que ele tenha uma aparência similar a de um texto legível. Muitos softwares de publicação e editores de páginas na internet agora usam Lorem Ipsum como texto-modelo padrão, e uma rápida busca por 'lorem ipsum' mostra vários websites ainda em sua fase de construção. Várias versões novas surgiram ao longo dos anos, eventualmente por acidente, e às vezes de propósito (injetando humor, e coisas do gênero). </div></div></div> <div class="menu-empresa-banner"><img src="img/alimentos.jpg" width="100%"/></div> <div class="center"><div class="clearfix"><div class="cut1"> <div id="grid4A"><img src="img/sucos.jpg" width="100%"/></div> <div id="grid4A"><img src="img/sucos.jpg" width="100%"/></div> <div id="grid4A"><img src="img/sucos.jpg" width="100%"/></div> <div id="grid4A"><img src="img/sucos.jpg" width="100%"/></div> <div id="grid4A"><img src="img/sucos.jpg" width="100%"/></div> <div id="grid4A"><img src="img/sucos.jpg" width="100%"/></div> </div></div></div> <div id="menu-empresa-titulo"> <div class="alinha6">ONDE ESTAMOS</div> <div id="grid12"><div class="line"></div></div> </div> <div class="menu-empresa-mapa"><img src="img/mapa.jpg" width="100%"/></div> <?php include'footer.php';?> </body> </html><file_sep>/index.php <!doctype html> <html> <head> <?php include'head.php';?> <!--::::::::: MENU MOBILE :::::::::--> <link rel="stylesheet" type="text/css" href="menu/1/default.css"> <link rel="stylesheet" type="text/css" href="menu/1/component.css"> <script type="text/javascript" async src="menu/1/ga.js"></script> <script src="menu/1/modernizr.custom.js"></script><style type="text/css"></style> <!--::::::::: Bootstrap :::::::::--> <!-- jQuery (necessary for Bootstrap's JavaScript plugins) --> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script> <!-- Include all compiled plugins (below), or include individual files as needed --> <script src="bootstrap/js/bootstrap.min.js"></script> <link href="bootstrap/css/bootstrap.min.css" rel="stylesheet"> <!--::::::::: EFFECT :::::::::--> </head> <body> <?php include'topo.php';?> <?php include'destaque.php';?> <?php include'footer.php';?> </body> </html><file_sep>/produtos.php <!doctype html> <html> <head> <?php include'head.php';?> <!--::::::::: MENU MOBILE :::::::::--> <link rel="stylesheet" type="text/css" href="menu/1/default.css"> <link rel="stylesheet" type="text/css" href="menu/1/component.css"> <script type="text/javascript" async src="menu/1/ga.js"></script> <script src="menu/1/modernizr.custom.js"></script><style type="text/css"></style> </head> <body> <?php include'topo2.php';?> <div class="center"><div class="clearfix"> <div id="menu-empresa-titulo"> <div class="alinha6">PRODUTOS PARA SEU VAREJO</div> <div id="grid12"><div class="line"></div></div> </div></div></div> <div class="menu-verejo-banner"><img src="img/alimentos.jpg" width="100%"/></div> <div class="center"><div class="clearfix"><div class="cut1"> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> <div id="grid4A"> <div id="grid12"><div class="menu-verejo-foto"><img src="img/kabutia.jpg" width="100%"/></div></div> <div class="menu-varejo-nome">KABUTIA</div> </div> </div></div></div> <?php include'footer.php';?> </body> </html><file_sep>/noticias.php <!doctype html> <html> <head> <?php include'head.php';?> <!--::::::::: MENU MOBILE :::::::::--> <link rel="stylesheet" type="text/css" href="menu/1/default.css"> <link rel="stylesheet" type="text/css" href="menu/1/component.css"> <script type="text/javascript" async src="menu/1/ga.js"></script> <script src="menu/1/modernizr.custom.js"></script><style type="text/css"></style> </head> <body> <?php include'topo2.php';?> <div class="center"><div class="clearfix"> <div id="menu-empresa-titulo"> <div class="alinha6">DICAS E IDEIAS</div> <div id="grid12"><div class="line"></div></div> </div> <div class="cut1"> <div id="grid4B"> <div class="menu-noticias-foto"><a href="noticia-detalhe"><img src="img/sucos.jpg" width="100%"/></a></div> <div class="menu-varejo-nome">SUCOS</div> <div class="menu-noticias-text"> A ingestão de alimentos saudáveis também garante mais ene... </div><div id="grid12"><div class="menu-noticias-plus"><a href="noticia-detalhe"><img src="img/plus.png"/></a></div></div> </div> <div id="grid4B"> <div class="menu-noticias-foto"><a href="noticia-detalhe"><img src="img/sucos.jpg" width="100%"/></a></div> <div class="menu-varejo-nome">SUCOS</div> <div class="menu-noticias-text"> A ingestão de alimentos saudáveis também garante mais ene... </div><div id="grid12"><div class="menu-noticias-plus"><a href="noticia-detalhe"><img src="img/plus.png"/></a></div></div> </div> <div id="grid4B"> <div class="menu-noticias-foto"><a href="noticia-detalhe"><img src="img/sucos.jpg" width="100%"/></a></div> <div class="menu-varejo-nome">SUCOS</div> <div class="menu-noticias-text"> A ingestão de alimentos saudáveis também garante mais ene... </div><div id="grid12"><div class="menu-noticias-plus"><a href="noticia-detalhe"><img src="img/plus.png"/></a></div></div> </div> <div id="grid4B"> <div class="menu-noticias-foto"><a href="noticia-detalhe"><img src="img/sucos.jpg" width="100%"/></a></div> <div class="menu-varejo-nome">SUCOS</div> <div class="menu-noticias-text"> A ingestão de alimentos saudáveis também garante mais ene... </div><div id="grid12"><div class="menu-noticias-plus"><a href="noticia-detalhe"><img src="img/plus.png"/></a></div></div> </div> </div></div></div> <?php include'footer.php';?> </body> </html>
a3fe5b8499a92d91b0c75da35f36b082d61f5f16
[ "PHP" ]
8
PHP
marcelommarra/vox-alimentos
2ee83fb72c1caac9c047cbad5074fbd75d6bf4ae
3873a250520a9610b2f3d66f8c77b1bcb5cb2de1
refs/heads/master
<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:47 PM */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/Official.php'; require_once '../../classes/Club.php'; require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $officialToEdit = \rperv\Official::fromOID(intval($_POST["id"])); if($officialToEdit->getFirstname() == '' or $officialToEdit->getFirstname() == null) { echo json_encode(["success" => false, "error" => "user not found"]); exit(); } if(isset($_POST["firstname"])) $officialToEdit->setFirstname($_POST["firstname"]); if(isset($_POST["lastname"])) $officialToEdit->setLastname($_POST["lastname"]); if(isset($_POST["gender"])) $officialToEdit->setGender($_POST["gender"]); if(isset($_POST["title"])) $officialToEdit->setTitle($_POST["title"]); if(isset($_POST["birthday"])) $officialToEdit->setBirthday($_POST["birthday"]); if(isset($_POST["function"])) $officialToEdit->setFunction($_POST["function"]); if(isset($_POST["clubID"])) $officialToEdit->setClubID($_POST["clubID"]); $officialToEdit->saveChanges(); echo json_encode(["success" => true]);<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:24 PM */ namespace rperv; class Official { private $pdo; private $firstname, $lastname; private $gender, $title, $birthday, $function; private $oID, $clubID; /** * Official constructor. * * @param int $oID * @param string $firstname * @param string $lastname * @param string $gender * @param string $title * @param string $function * @param date $birthday * @param int $clubID */ public function __construct($oID, $firstname, $lastname, $gender, $title, $function, $birthday, $clubID) { $this->pdo = new PDO_MYSQL(); $this->oID = $oID; $this->firstname = $firstname; $this->lastname = $lastname; $this->gender = $gender; $this->title = $title; $this->birthday = $birthday; $this->function = $function; $this->clubID = $clubID; } /** * creates a new instance from a specific uID using dataO from db * * @param int $oID * @return Official */ public static function fromOID($oID) { $pdo = new PDO_MYSQL(); $res = $pdo->query("SELECT * FROM rperv_officials WHERE oID = :oid", [":oid" => $oID]); return new Official($res->oID, $res->firstname, $res->lastname, $res->gender, $res->title, $res->function, $res->birthday, $res->clubID); } /** * Deletes a athlete * * @return bool */ public function delete() { return $this->pdo->query("DELETE FROM rperv_officials WHERE oID = :oid", [":oid" => $this->oID]); } /** * Saves the Changes made to this object to the db */ public function saveChanges() { $this->pdo->queryUpdate("rperv_officials", ["firstname" => $this->firstname, "lastname" => $this->lastname, "gender" => $this->gender, "title" => $this->title, "birthday" => $this->birthday, "fnct" => $this->function, "clubID" => $this->clubID], "oID = :oid", ["oid" => $this->oID] ); } /** * Creates a new athlete from the give attribs * * @param $firstname * @param $lastname * @param $gender * @param $title * @param $birthday * @param $clubID */ public static function create($firstname, $lastname, $gender, $title, $function, $birthday, $clubID) { $pdo = new PDO_MYSQL(); $pdo->queryInsert("rperv_officials", ["firstname" => $firstname, "lastname" => $lastname, "gender" => $gender, "title" => $title, "birthday" => $birthday, "function" => $function, "clubID" => $clubID] ); } /** * @return mixed */ public function getFirstname() { return $this->firstname; } /** * @param mixed $firstname */ public function setFirstname($firstname) { $this->firstname = $firstname; } /** * @return mixed */ public function getLastname() { return $this->lastname; } /** * @param mixed $lastname */ public function setLastname($lastname) { $this->lastname = $lastname; } /** * @return mixed */ public function getGender() { return $this->gender; } /** * @param mixed $gender */ public function setGender($gender) { $this->gender = $gender; } /** * @return mixed */ public function getTitle() { return $this->title; } /** * @param mixed $title */ public function setTitle($title) { $this->title = $title; } /** * @return mixed */ public function getBirthday() { return $this->birthday; } /** * @param mixed $birthday */ public function setBirthday($birthday) { $this->birthday = $birthday; } /** * @return mixed */ public function getFunction() { return $this->function; } /** * @param mixed $function */ public function setFunction($function) { $this->function = $function; } /** * @return int */ public function getOID() { return $this->oID; } /** * @param int $oID */ public function setOID($oID) { $this->oID = $oID; } /** * @return int */ public function getClubID() { return $this->clubID; } /** * @param int $clubID */ public function setClubID($clubID) { $this->clubID = $clubID; } /** * Specify data which should be serialized to JSON * * @link https://php.net/manual/en/jsonserializable.jsonserialize.php * @return mixed data which can be serialized by <b>json_encode</b>, * which is a value of any type other than a resource. * @since 5.4.0 */ public function jsonSerialize() { return [ "oID" => $this->oID, "firstname" => $this->firstname, "lastname" => $this->lastname, "gender" => $this->gender, "title" => $this->title, "birthday" => $this->birthday, "function" => $this->function, "clubID" => Club::fromClubID($this->clubID) ]; } }<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:15 PM */ namespace rperv; class Athlete implements \JsonSerializable { private $pdo; private $firstname, $lastname; private $gender, $title, $birthday; private $aID, $clubID; /** * Athlete constructor. * * @param int $aID * @param string $firstname * @param string $lastname * @param string $gender * @param string $title * @param date $birthday * @param int $clubID */ public function __construct($aID, $firstname, $lastname, $gender, $title, $birthday, $clubID) { $this->pdo = new PDO_MYSQL(); $this->aID = $aID; $this->firstname = $firstname; $this->lastname = $lastname; $this->gender = $gender; $this->title = $title; $this->birthday = $birthday; $this->clubID = $clubID; } /** * creates a new instance from a specific uID using dataO from db * * @param int $aID * @return Athlete */ public static function fromAID($aID) { $pdo = new PDO_MYSQL(); $res = $pdo->query("SELECT * FROM rperv_athletes WHERE aID = :aid", [":aid" => $aID]); return new Athlete($res->aID, $res->firstname, $res->lastname, $res->gender, $res->title, $res->birthday, $res->clubID); } /** * Deletes a athlete * * @return bool */ public function delete() { return $this->pdo->query("DELETE FROM rperv_athletes WHERE aID = :aid", [":aid" => $this->aID]); } /** * Saves the Changes made to this object to the db */ public function saveChanges() { $this->pdo->queryUpdate("rperv_athletes", ["firstname" => $this->firstname, "lastname" => $this->lastname, "gender" => $this->gender, "title" => $this->title, "birthday" => $this->birthday, "clubID" => $this->clubID], "aID = :aid", ["aid" => $this->aID] ); } /** * Creates a new athlete from the give attribs * * @param $firstname * @param $lastname * @param $gender * @param $title * @param $birthday * @param $clubID */ public static function create($firstname, $lastname, $gender, $title, $birthday, $clubID) { $pdo = new PDO_MYSQL(); $pdo->queryInsert("rperv_athletes", ["firstname" => $firstname, "lastname" => $lastname, "gender" => $gender, "title" => $title, "birthday" => $birthday, "clubID" => $clubID] ); } /** * @return mixed */ public function getFirstname() { return $this->firstname; } /** * @param mixed $firstname */ public function setFirstname($firstname) { $this->firstname = $firstname; } /** * @return mixed */ public function getLastname() { return $this->lastname; } /** * @param mixed $lastname */ public function setLastname($lastname) { $this->lastname = $lastname; } /** * @return mixed */ public function getGender() { return $this->gender; } /** * @param mixed $gender */ public function setGender($gender) { $this->gender = $gender; } /** * @return mixed */ public function getTitle() { return $this->title; } /** * @param mixed $title */ public function setTitle($title) { $this->title = $title; } /** * @return mixed */ public function getBirthday() { return $this->birthday; } /** * @param mixed $birthday */ public function setBirthday($birthday) { $this->birthday = $birthday; } /** * @return int */ public function getAID() { return $this->aID; } /** * @param int $aID */ public function setAID($aID) { $this->aID = $aID; } /** * @return int */ public function getClubID() { return $this->clubID; } /** * @param int $clubID */ public function setClubID($clubID) { $this->clubID = $clubID; } /** * Specify data which should be serialized to JSON * * @link https://php.net/manual/en/jsonserializable.jsonserialize.php * @return mixed data which can be serialized by <b>json_encode</b>, * which is a value of any type other than a resource. * @since 5.4.0 */ public function jsonSerialize() { return [ "aID" => $this->aID, "firstname" => $this->firstname, "lastname" => $this->lastname, "gender" => $this->gender, "title" => $this->title, "birthday" => $this->birthday, "clubID" => Club::fromClubID($this->clubID) ]; } }<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:06 PM */ namespace rperv; class Club implements \JsonSerializable { private $pdo; private $clubID, $clubCity; private $clubName, $clubNameShort; /** * Club constructor. * * @param $clubID * @param $clubCity * @param $clubName * @param $clubNameShort */ public function __construct($clubID, $clubCity, $clubName, $clubNameShort) { $this->pdo = new PDO_MYSQL(); $this->clubID = $clubID; $this->clubCity = $clubCity; $this->clubName = $clubName; $this->clubNameShort = $clubNameShort; } /** * creates a new instance from a specific uID using dataO from db * * @param int $clubID * @return Club */ public static function fromClubID($clubID) { $pdo = new PDO_MYSQL(); $res = $pdo->query("SELECT * FROM rperv_clubs WHERE clubID = :cid", [":cid" => $clubID]); return new Club($res->clubID, $res->clubCity, $res->clubName, $res->clubNameShort); } /** * Deletes a user * * @return bool */ public function delete() { return $this->pdo->query("DELETE FROM rperv_clubs WHERE clubID = :cid", [":cid" => $this->clubID]); } /** * Saves the Changes made to this object to the db */ public function saveChanges() { $this->pdo->queryUpdate("rperv_club", ["clubName" => $this->clubName, "clubNameShort" => $this->clubNameShort, "clubCity" => $this->clubCity], "clubID = :cid", ["cid" => $this->clubID] ); } /** * Creates a new user from the give attribs * * @param string $clubName * @param string $clubNameShort * @param string $clubCity */ public static function create($clubName, $clubNameShort, $clubCity) { $pdo = new PDO_MYSQL(); $pdo->queryInsert("rperv_user", ["clubName" => $clubName, "clubNameShort" => $clubNameShort, "clubCity" => $clubCity] ); } /** * @return mixed */ public function getClubID() { return $this->clubID; } /** * @param mixed $clubID */ public function setClubID($clubID) { $this->clubID = $clubID; } /** * @return mixed */ public function getClubCity() { return $this->clubCity; } /** * @param mixed $clubCity */ public function setClubCity($clubCity) { $this->clubCity = $clubCity; } /** * @return mixed */ public function getClubName() { return $this->clubName; } /** * @param mixed $clubName */ public function setClubName($clubName) { $this->clubName = $clubName; } /** * @return mixed */ public function getClubNameShort() { return $this->clubNameShort; } /** * @param mixed $clubNameShort */ public function setClubNameShort($clubNameShort) { $this->clubNameShort = $clubNameShort; } /** * Specify data which should be serialized to JSON * * @link https://php.net/manual/en/jsonserializable.jsonserialize.php * @return mixed data which can be serialized by <b>json_encode</b>, * which is a value of any type other than a resource. * @since 5.4.0 */ public function jsonSerialize() { return [ "clubID" => $this->clubID, "clubName" => $this->clubName, "clubNameShort" => $this->clubNameShort, "clubCity" => $this->clubCity ]; } }<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 29.09.2016 * Time: 19:53 */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $users = \rperv\User::getList($_GET["page"], intval($_GET["pagesize"]), utf8_decode($_GET["search"]), $_GET["sortO"]); echo json_encode($users);<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 04.10.2016 * Time: 22:36 */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $username = $_POST["username"]; $passhash = $_POST["passhash"]; $email = $_POST["email"]; if(!\rrshop\User::doesUserNameExist($username)) { if($username != "" && $passhash != "" && $email) { \rperv\User::createUser($username, $passhash, $email); echo json_encode(["success" => true]); } else echo json_encode(["success" => false, "error" => "missing fields"]); } else echo json_encode(["success" => false, "error" => "username exists"]);<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:46 PM */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/Club.php'; require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $clubToView = \rperv\Club::fromClubID(intval($_GET["id"])); if($clubToView->getClubID() != null) echo json_encode($clubToView); else echo json_encode(["success" => false, "error" => "ID unknown"]);<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:46 PM */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/Athlete.php'; require_once '../../classes/Club.php'; require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $firstname = $_POST["firstname"]; $lastname = $_POST["lastname"]; $gender = $_POST["gender"]; $title = $_POST["title"]; $birthday = $_POST["birthday"]; $clubID = $_POST["clubID"]; if($firstname != "" && $lastname != "" && $gender != "" && $title != "" && $birthday != "" && $clubID != "") { \rperv\Athlete::create($firstname, $lastname, $gender, $title, $birthday, $clubID); echo json_encode(["success" => true]); } else echo json_encode(["success" => false, "error" => "missing fields"]);<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:46 PM */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/Club.php'; require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $clubName = $_POST["name"]; $clubNameShort = $_POST["shortName"]; $clubCity = $_POST["city"]; if($clubNameShort != "" && $clubName != "" && $clubCity != "") { \rperv\Club::create($clubName, $clubNameShort, $clubCity); echo json_encode(["success" => true]); } else echo json_encode(["success" => false, "error" => "missing fields"]);<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:47 PM */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/Club.php'; require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $clubToEdit = \rperv\Club::fromClubID(intval($_POST["id"])); if($clubToEdit->getClubName() == '' or $clubToEdit->getClubName() == null) { echo json_encode(["success" => false, "error" => "club not found"]); exit(); } if(isset($_POST["name"])) $clubToEdit->setClubName($_POST["name"]); if(isset($_POST["nameShort"])) $clubToEdit->setClubNameShort($_POST["nameShort"]); if(isset($_POST["city"])) $clubToEdit->setClubCity($_POST["city"]); $clubToEdit->saveChanges(); echo json_encode(["success" => true]);<file_sep><?php /** * Created by PhpStorm. * User: yanni * Date: 11/7/2018 * Time: 11:47 PM */ ini_set("display_errors", "on"); error_reporting(E_ALL & ~E_NOTICE); header("Content-Type: text/json"); require_once '../../classes/PDO_Mysql.php'; //DB Anbindung require_once '../../classes/Athlete.php'; require_once '../../classes/Club.php'; require_once '../../classes/User.php'; $user = \rperv\User::checkSession(); $athleteToEdit = \rperv\Athlete::fromAID(intval($_POST["id"])); if($athleteToEdit->getFirstname() == '' or $athleteToEdit->getFirstname() == null) { echo json_encode(["success" => false, "error" => "user not found"]); exit(); } if(isset($_POST["firstname"])) $athleteToEdit->setFirstname($_POST["firstname"]); if(isset($_POST["lastname"])) $athleteToEdit->setLastname($_POST["lastname"]); if(isset($_POST["gender"])) $athleteToEdit->setGender($_POST["gender"]); if(isset($_POST["title"])) $athleteToEdit->setTitle($_POST["title"]); if(isset($_POST["birthday"])) $athleteToEdit->setBirthday($_POST["birthday"]); if(isset($_POST["clubID"])) $athleteToEdit->setClubID($_POST["clubID"]); $athleteToEdit->saveChanges(); echo json_encode(["success" => true]);
f703ad00865f71320ac244f0da375747872de8c9
[ "PHP" ]
11
PHP
yannick9906/rperv-tool
b71e83880e3af19ac37292a13028fc23df50ddac
446e455eab3e1a9ac73e4ddeed6546967ac3fda0
refs/heads/master
<repo_name>tlystad24/nodeuino<file_sep>/LED Control/arduino.js // Import Johnny Five library var five = require('johnny-five'); var board = new five.Board; // Import express var express = require('express'); var app = express(); // Run codeblock when board is ready board.on('ready', function() { console.log('Ready'); var led = new five.Led(13); var state = 'off'; app.get('/', function(req, res) { res.send('<a href="/toggle">Toggle LED</a><br><p>The led is currently ' + state); }); app.get('/toggle', function(req, res) { res.redirect('/'); if (state == 'off') { led.on(); state = 'on'; } else { led.off(); state = 'off'; } }); app.listen('1337', () => { console.log('Listening!')}); }); <file_sep>/LED Control/README.md # Arduino + NodeJS ## Requirements - Arduino - LED `(pin 13)` - NODE.JS + Express + Johnny-Five ## Deployment How to run the program ### Arduino The arduino is using the firmata protocol to recieve instructions from the NodeJS app. 1. Upload `StandardFirmata` to Arduino 2. Connect the Arduino to your computer ### Server The server is serving a simple website with a button and a status indicator. 1. After downloading, run `npm install` to install the dependencies. 2. Run the server using `npm start` or `node arduino.js`. 3. The server will run on post `1337` unless another port is specified. 4. Open up a browser and navigate to `http://localhost:1337` and click the button to toggle between the led states.
252e86d0053258bfbf67a7d8b683a73d4fdaddcb
[ "JavaScript", "Markdown" ]
2
JavaScript
tlystad24/nodeuino
98c7b88fb6159eb0deccc8032070a5952a1fc9f9
17eebdc48205ba3d88d0cffafec2100032caef32
refs/heads/master
<repo_name>MasyafXV/Labyrinth<file_sep>/assets/scripts/tiles.py import itertools import bpy walls = ['WallRight', 'WallUp', 'WallLeft', 'WallDown'] for bools in itertools.product([True, False], repeat=4): walls_to_hide = [wall for (i,wall) in enumerate(walls) if not bools[i]] for wall in walls: bpy.data.objects[wall].hide_render = False for wall in walls_to_hide: bpy.data.objects[wall].hide_render = True suffix = ''.join(str(int(b)) for b in bools) bpy.context.scene.render.filepath = '../maze-tile%s' % suffix bpy.ops.render.render(write_still=True) <file_sep>/src/constants.h // General constants #define SCREEN_WIDTH 900 #define SCREEN_HEIGHT (900 * 4 / 5) // Menu constants #define TITLE_FILENAME "../assets/maze.png" #define TITLE_WIDTH 800 #define TITLE_X ((SCREEN_WIDTH - TITLE_WIDTH) / 2) #define TITLE_Y 0 #define PLAY_FILENAME "../assets/play.png" #define PLAY_WIDTH 400 #define PLAY_X ((SCREEN_WIDTH - PLAY_WIDTH) / 2) #define PLAY_Y 300 #define QUIT_FILENAME "../assets/quit.png" #define QUIT_WIDTH 400 #define QUIT_X ((SCREEN_WIDTH - QUIT_WIDTH) / 2) #define QUIT_Y 400 // Maze #define MAZE_FILENAME "../assets/level1.tmx" #define MAZE_INITIAL_X 0 #define MAZE_INITIAL_Y 0 #define MAZE_NUM_ROWS 8 #define MAZE_NUM_COLS 10 // Character #define CHARACTER_SPRITESHEET "../assets/character/walking.png" #define CHARACTER_SCALE ((SCREEN_WIDTH) / 2560.0) #define MOVE_DURATION 200 #define CHARACTER_WALKING_UP_ROW 0 #define CHARACTER_WALKING_LEFT_ROW 1 #define CHARACTER_WALKING_DOWN_ROW 2 #define CHARACTER_WALKING_RIGHT_ROW 3 #define CHARACTER_BETWEEN_FRAME (MOVE_DURATION / 20) #define CHARACTER_HORIZONTAL_STEP (SCREEN_WIDTH / 10) #define CHARACTER_VERTICAL_STEP (SCREEN_HEIGHT / 8) <file_sep>/src/utils.h #ifndef UTILS_H #define UTILS_H // -------------- // // Data structure // // -------------- // enum Direction { // Elementary directions DIRECTION_RIGHT, // Right DIRECTION_UP, // Up direction DIRECTION_LEFT, // Left direction DIRECTION_DOWN, // Down direction }; struct Point { // Representing a 2D point int x; // The x-coordinate int y; // The y-coordinate }; struct Position { // Representing a position in the maze int row; // The row number int col; // The column number }; #endif <file_sep>/src/game.h #ifndef GAME_H #define GAME_H #include "maze.h" #include "character.h" // --------------- // // Data structures // // --------------- // enum GameState { GAME_PLAY, // The player is playing GAME_MENU, // The player is returning to the menu GAME_QUIT // The player is quitting }; struct Game { struct Maze *maze; // The current maze struct Character *character; // The character SDL_Renderer *renderer; // The renderer enum GameState state; // The state of the game }; // --------- // // Functions // // --------- // /** * Creates a new game. * * @param The renderer for the game * @return A pointer to the game, NULL if there was an error */ struct Game *Game_initialize(SDL_Renderer *renderer); /** * Delete the game. * * @param game The game to delete */ void Game_delete(struct Game *game); /** * Start running the game. * * @param game The game to run */ void Game_run(struct Game *game); #endif <file_sep>/README.md # Maze Game A small maze game used to show the basic usage of the SDL2 library in C. ## Dependencies - SDL2 - SDL2_image - [TMX](https://github.com/baylej/tmx) - [libXML2](http://www.xmlsoft.org/) The program depends on SDL2 and SDL2_image, as well as a C parser for TMX files. TMX is a XML-format used by the [Tiled map editor](http://www.mapeditor.org/), a free and open-source software used to create maps. In particular, the TMX parser uses LibXML2. ## Supported platforms Tested on Mac OS 10.10.5 (Yosemite). In principle, it should work on any UNIX system provided that the dependencies are all installed. ## Compilation Make sure you have installed all dependencies. The compilation relies on the `sdl2-config` program that is shipped when installing `libsdl2-dev`, and the `xml2-config` program shipped with `libxml2-dev`. You also need to install a TMX parser for C. Move to the `src` folder and then simply type ~~~ make ~~~ followed by ~~~ ./maze ~~~ to run. ## Content There are two folders: 1. `assets` contain all assets used in the game. This includes 1. A TMX file (`level1.tmx`) storing the single maze in this game; 1. Three text images (`play.png`, `quit.png` and `maze.png`) used in the menu; 1. Two Blender files (`assets.blend` and `character.blend`) used to render the spritesheets; 1. A texture used by Blender for the character (`character-texture.png`); 1. The tiles generated by Blender (`maze-tile*.png`); 1. An animable sprite sheet (`character/walking.png`) for animating the character. 1. `src` contain all header and source files (`*.h`, `*.c`). ## Status There is currently no known bug. <file_sep>/src/maze.c #include "maze.h" /** * Generate the texture of the whole maze. * * @param maze The maze whose texture is generated */ void Maze_generateTexture(struct Maze *maze); static SDL_Renderer *Maze_renderer; void* Maze_imageLoader(const char *path) { return IMG_LoadTexture(Maze_renderer, path); } // ---------------- // // Public functions // // ---------------- // struct Maze *Maze_create(char *filename, SDL_Renderer *renderer) { // Warning: modifying global variables of TMX for loading map Maze_renderer = renderer; tmx_img_load_func = (void* (*)(const char*))Maze_imageLoader; tmx_img_free_func = (void (*)(void*)) SDL_DestroyTexture; tmx_map *map = tmx_load(filename); if (!map) { return NULL; } else { struct Maze *maze; maze = (struct Maze*)malloc(sizeof(struct Maze)); maze->map = map; maze->renderer = renderer; Maze_generateTexture(maze); return maze; } } void Maze_render(struct Maze *maze) { SDL_RenderCopy(maze->renderer, maze->texture, NULL, NULL); } bool Maze_hasWall(struct Maze *maze, struct Position position, enum Direction direction) { tmx_layer *layer = maze->map->ly_head; unsigned int gid = layer->content.gids[(position.row * maze->map->width) + position.col]; switch (direction) { case DIRECTION_RIGHT: return (gid - 1) % 16 < 8; break; case DIRECTION_UP: return (gid - 1) % 8 < 4; break; case DIRECTION_LEFT: return (gid - 1) % 4 < 2; break; case DIRECTION_DOWN: return (gid - 1) % 2 < 1; break; } } void Maze_delete(struct Maze *maze) { if (maze != NULL) { if (maze->texture != NULL) { SDL_DestroyTexture(maze->texture); maze->texture = NULL; } tmx_map_free(maze->map); free(maze); } } // ----------------- // // Private functions // // ----------------- // void Maze_generateTexture(struct Maze *maze) { tmx_map *map = maze->map; int textureWidth = map->width * map->tile_width; int textureHeight = map->height * map->tile_height; maze->texture = SDL_CreateTexture(maze->renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET, textureWidth, textureHeight); SDL_SetRenderTarget(maze->renderer, maze->texture); SDL_RenderClear(maze->renderer); tmx_layer *layer; for (layer = map->ly_head; layer; layer = layer->next) { unsigned int i, j; for (i = 0; i < map->height; i++) { for (j = 0; j < map->width; j++) { unsigned int gid; SDL_Rect srcrect, dstrect; SDL_Texture* texture; gid = layer->content.gids[(i * map->width) + j]; tmx_image *image = map->tiles[gid]->image; image = map->tiles[gid]->image; srcrect.x = map->tiles[gid]->ul_x; srcrect.y = map->tiles[gid]->ul_y; srcrect.w = image->width; srcrect.h = image->height; dstrect.w = image->width; dstrect.h = image->height; dstrect.x = j * image->width; dstrect.y = i * image->height; texture = (SDL_Texture*)image->resource_image; SDL_RenderCopy(maze->renderer, texture, &srcrect, &dstrect); } } } SDL_SetRenderTarget(maze->renderer, NULL); } <file_sep>/src/maze.h #ifndef MAZE_H #define MAZE_H #include "utils.h" #include "sdl2.h" #include <stdbool.h> #include <tmx.h> // --------------- // // Data structures // // --------------- // struct Maze { tmx_map *map; // The TMX map struct Position start; // The starting position struct Position end; // The end position SDL_Texture *texture; // The texture (image) SDL_Renderer *renderer; // The renderer }; // --------- // // Functions // // --------- // /** * Creates a maze from a TMX file. * * @param filename The path of the TMX file * @param renderer The renderer * @return A pointer to the create maze, NULL if there was an error; * Call IMG_GetError() for more information. */ struct Maze *Maze_create(char *filename, SDL_Renderer *renderer); /** * Delete the given maze. * * @param maze The maze to be deleted */ void Maze_delete(struct Maze *maze); /** * Renders the maze with a renderer. * * @param maze The maze to be rendered */ void Maze_render(struct Maze *maze); /** * Returns true if and only if there is a wall in the given direction. * * Warning: Assumes that the tiles' gid are enumerated in lexicographic * order with respect to the presence (1) or absence (0) of the right, * upper, left and lower wall respectively. * * @param maze The maze * @param position The position * @param direction The direction to look at * @return true if and only if there is a wall */ bool Maze_hasWall(struct Maze *maze, struct Position position, enum Direction direction); #endif <file_sep>/src/sdl2.h #include <SDL.h> #include <SDL_image.h> <file_sep>/src/character.h #ifndef CHARACTER_H #define CHARACTER_H #include "utils.h" #include "maze.h" #include "sdl2.h" #include "animated_spritesheet.h" // --------------- // // Data structures // // --------------- // struct CharacterMove { struct Point source; // The source position struct Point target; // The target position enum Direction direction; // The direction of the move int startTime; // The start time int endTime; // The end time int duration; // The total duration of the move }; struct Character { struct AnimatedSpritesheet *animatedSpritesheet; // His spritesheet struct Point screenPosition; // His position in the screen struct Position mazePosition; // His position in the maze bool moving; // True if currently moving struct CharacterMove currentMove; // The current move SDL_Renderer *renderer; // The renderer }; // --------- // // Functions // // --------- // /** * Creates the character. * * @param renderer The renderer * @return A pointer to the character, NULL if there was an error; * Call IMG_GetError() for more information. */ struct Character *Character_create(SDL_Renderer *renderer); /** * Deletes the character. * * @param character The character to delete */ void Character_delete(struct Character *character); /** * Renders the character. * * @param character The character to render */ void Character_render(struct Character *character); /** * Moves the character. * * If the character is already moving, the present move is ignored. * * @param character The character to move * @param direction The direction of the move * @param duration The time (in ms) of the whole move */ void Character_move(struct Character *character, enum Direction direction, int duration); #endif <file_sep>/src/game.c #include "game.h" #include "sdl2.h" #include "constants.h" // --------------------------- // // Private function prototypes // // --------------------------- // bool Game_validMove(struct Game *game, enum Direction direction); struct Game *Game_initialize(SDL_Renderer *renderer) { struct Game *game; game = (struct Game*)malloc(sizeof(struct Game)); game->renderer = renderer; game->maze = Maze_create(MAZE_FILENAME, renderer); game->character = Character_create(renderer); game->state = GAME_PLAY; return game; } void Game_delete(struct Game *game) { if (game != NULL) { Maze_delete(game->maze); Character_delete(game->character); free(game); } } void Game_run(struct Game *game) { SDL_Event e; while (game->state == GAME_PLAY) { while (SDL_PollEvent(&e) != 0) { if (e.type == SDL_QUIT) { game->state = GAME_QUIT; } else if (e.type == SDL_KEYDOWN) { switch (e.key.keysym.sym) { case SDLK_LEFT: if (Game_validMove(game, DIRECTION_LEFT)) { Character_move(game->character, DIRECTION_LEFT, MOVE_DURATION); } break; case SDLK_RIGHT: if (Game_validMove(game, DIRECTION_RIGHT)) { Character_move(game->character, DIRECTION_RIGHT, MOVE_DURATION); } break; case SDLK_UP: if (Game_validMove(game, DIRECTION_UP)) { Character_move(game->character, DIRECTION_UP, MOVE_DURATION); } break; case SDLK_DOWN: if (Game_validMove(game, DIRECTION_DOWN)) { Character_move(game->character, DIRECTION_DOWN, MOVE_DURATION); } break; } } } if (game->character->mazePosition.row == game->maze->map->height - 1 && game->character->mazePosition.col == game->maze->map->width - 1) { game->state = GAME_MENU; } SDL_SetRenderDrawColor(game->renderer, 0x00, 0x00, 0x00, 0x00 ); SDL_RenderClear(game->renderer); Maze_render(game->maze); Character_render(game->character); SDL_RenderPresent(game->renderer); } } bool Game_validMove(struct Game *game, enum Direction direction) { struct Position currentPosition = game->character->mazePosition; struct Position targetPosition = currentPosition; switch (direction) { case DIRECTION_RIGHT: ++targetPosition.col; break; case DIRECTION_UP: --targetPosition.row; break; case DIRECTION_LEFT: --targetPosition.col; break; case DIRECTION_DOWN: ++targetPosition.row; break; } return Maze_hasWall(game->maze, game->character->mazePosition, direction); } <file_sep>/src/character.c #include "constants.h" #include "character.h" struct Character *Character_create(SDL_Renderer *renderer) { struct Character *character; character = (struct Character*)malloc(sizeof(struct Character)); character->renderer = renderer; character->moving = false; character->screenPosition.x = MAZE_INITIAL_X; character->screenPosition.y = MAZE_INITIAL_Y; character->mazePosition.row = 0; character->mazePosition.col = 0; character->animatedSpritesheet = AnimatedSpritesheet_create(CHARACTER_SPRITESHEET, 4, 20, 80, CHARACTER_BETWEEN_FRAME, renderer); character->animatedSpritesheet->spritesheet->scale = CHARACTER_SCALE; return character; } void Character_delete(struct Character *character) { if (character != NULL) { AnimatedSpritesheet_delete(character->animatedSpritesheet); free(character); } } void Character_render(struct Character *character) { if (character->moving) { int now = SDL_GetTicks(); if (now > character->currentMove.endTime) { character->moving = false; switch (character->currentMove.direction) { case DIRECTION_RIGHT: character->mazePosition.col += 1; break; case DIRECTION_UP: character->mazePosition.row -= 1; break; case DIRECTION_LEFT: character->mazePosition.col -= 1; break; case DIRECTION_DOWN: character->mazePosition.row += 1; break; } AnimatedSpritesheet_stop(character->animatedSpritesheet); character->screenPosition = character->currentMove.target; } else { float t = (now - character->currentMove.startTime) / (float)character->currentMove.duration; character->screenPosition.x = (1 - t) * character->currentMove.source.x + t * character->currentMove.target.x; character->screenPosition.y = (1 - t) * character->currentMove.source.y + t * character->currentMove.target.y; } } AnimatedSpritesheet_render(character->animatedSpritesheet, character->screenPosition.x, character->screenPosition.y); } void Character_move(struct Character *character, enum Direction direction, int duration) { if (!character->moving) { character->moving = true; int now = SDL_GetTicks(); character->currentMove.startTime = now; character->currentMove.duration = duration; character->currentMove.endTime = now + duration; character->currentMove.direction = direction; character->currentMove.source = character->screenPosition; character->currentMove.target = character->screenPosition; switch (direction) { case DIRECTION_RIGHT: character->currentMove.target.x += CHARACTER_HORIZONTAL_STEP; AnimatedSpritesheet_setRow(character->animatedSpritesheet, CHARACTER_WALKING_RIGHT_ROW); break; case DIRECTION_UP: character->currentMove.target.y -= CHARACTER_VERTICAL_STEP; AnimatedSpritesheet_setRow(character->animatedSpritesheet, CHARACTER_WALKING_UP_ROW); break; case DIRECTION_LEFT: character->currentMove.target.x -= CHARACTER_HORIZONTAL_STEP; AnimatedSpritesheet_setRow(character->animatedSpritesheet, CHARACTER_WALKING_LEFT_ROW); break; case DIRECTION_DOWN: character->currentMove.target.y += CHARACTER_VERTICAL_STEP; AnimatedSpritesheet_setRow(character->animatedSpritesheet, CHARACTER_WALKING_DOWN_ROW); break; } AnimatedSpritesheet_run(character->animatedSpritesheet); } }
f634bd4fed8d3aac01343729251a25349d613845
[ "Markdown", "C", "Python" ]
11
Python
MasyafXV/Labyrinth
3e70cfa4b1d38402232a9d4ce86f2784b02d33c5
38bff298fdff4d9c81e729e3490ac3dde587f4c4
refs/heads/master
<repo_name>msyamsula/chat<file_sep>/deploy-chat-aws.sh source .env-chat sudo docker stop $IMAGE sudo docker rm $IMAGE sudo docker rmi $DOCKER_USERNAME/$IMAGE sudo docker pull $DOCKER_USERNAME/$IMAGE sudo docker run -itd --name $IMAGE -p 5000:5000 $DOCKER_USERNAME/$IMAGE <file_sep>/io.js // server side io const socketio = require("socket.io"); let appServer = require("./app"); const httpServer = require("http").createServer(appServer); httpServer.listen(5000); const io = socketio(httpServer); const namespaces = require("./network"); io.on("connect", (socket) => { socket.emit("server-send-namespace", { data: namespaces }); }); namespaces.forEach((ns) => { io.of(ns.endpoint).on("connect", (nsSocket) => { nsSocket.emit(`welcome-to-${ns.name}`, `welcome-to-${ns.name}`); nsSocket.on("join-room", (message, joinCallback) => { // join room nsSocket.join(message.data); // find current room object/model const currentRoom = ns.rooms.filter((room) => { return room.name == message.data; }); io.of(ns.endpoint) .in(message.data) .clients((err, clients) => { // joinCallback(clients.length); let totalMember = clients.length; io.of(ns.endpoint) .to(message.data) .emit("new-user", { data: totalMember }); }); nsSocket.emit("catch-history", { data: currentRoom[0].history }); }); nsSocket.on("message-to-server", (message) => { // find room name from socket.rooms const roomName = Object.keys(nsSocket.rooms)[1]; // find current room object/model const currentRoom = ns.rooms.filter((room) => { return room.name == roomName; }); // push data to currentRoom history currentRoom[0].addMessage(message.data); // send to room io.of(ns.endpoint) .to(roomName) .emit("message-from-server", { data: message.data }); }); nsSocket.on("leave-room", (message) => { const currentRoom = Object.keys(nsSocket.rooms)[1]; // leave room nsSocket.leave(currentRoom); io.of(ns.endpoint) .in(currentRoom) .clients((err, clients) => { // enter room and publish to all room about user left io.of(ns.endpoint) .to(currentRoom) .emit("leave-room", { data: clients.length }); }); }); }); }); <file_sep>/frontend/main.js // client side io const io = require("socket.io-client"); import writeChat from "./module/writeChat"; const url = "http://localhost:5000"; const socket = io(url); let nsSocket; socket.on("server-send-namespace", (message) => { // namespace tab let nsHTML = document.querySelector(".namespaces"); nsHTML.innerHTML = ""; let namespaces = message.data; console.log(namespaces); // insert html to namespace tab namespaces.forEach((ns) => { nsHTML.insertAdjacentHTML( "beforeend", `<li id=ns-${ns.id}>${ns.name}</li>` ); // add event listener let nsId = document.getElementById(`ns-${ns.id}`); nsId.addEventListener("click", (event) => { event.preventDefault(); // room tab let roomHTML = document.querySelector(".rooms"); roomHTML.innerHTML = ""; const nsRoomList = ns.rooms; // insert html to room tab based on clicked namespace nsRoomList.forEach((room) => { roomHTML.insertAdjacentHTML( "beforeend", `<li id=room-${room.id}>${room.name}</li>` ); // add event listener to each room let roomId = document.getElementById(`room-${room.id}`); roomId.addEventListener("click", (event) => { event.preventDefault(); // grab chat tab and add current room to HTML let chatHTML = document.querySelector(".chat"); let totalMember = 1; chatHTML.innerHTML = `<div class="room-header ${totalMember}">Current room: ${room.name}</div> <div class="room-member">${totalMember} active user(s)</div>`; // add input text chatHTML.insertAdjacentHTML( "beforeend", `<input type="text" class="text-box" />` ); // disconnect from previous namespace if (nsSocket) { nsSocket.emit("leave-room", {}); nsSocket.close(); } // connect to current namespace nsSocket = io(`${url}${ns.endpoint}`); nsSocket.emit("join-room", { data: room.name }); nsSocket.on("catch-history", (message) => { const history = message.data; history.forEach((text) => { chatHTML.insertAdjacentHTML("beforeend", `<div>${text}</div>`); }); }); nsSocket.on("new-user", (message) => { let roomHeaderHTML = document.querySelector(".room-header"); let roomMemberHTML = document.querySelector(".room-member"); totalMember = message.data; roomHeaderHTML.innerHTML = `Current room: ${room.name}`; roomMemberHTML.innerHTML = `${totalMember} active user(s)`; }); nsSocket.on("leave-room", (message) => { let roomMemberHTML = document.querySelector(".room-member"); totalMember = message.data; roomMemberHTML.innerHTML = `${totalMember} active user(s)`; }); // add textBox object let textBox = new writeChat(nsSocket); // nsSocket.on(`welcome-to-${ns.name}`, (data) => { // // join room // }); nsSocket.on("message-from-server", (message) => { console.log(message.data); let chatdiv = document.querySelector(".chat"); chatdiv.insertAdjacentHTML( "beforeend", `<div>${message.data}</div>` ); }); }); }); }); }); }); <file_sep>/frontend/module/writeChat.js export default class writeChat { constructor(nsSocket) { this.nsSocket = nsSocket; this.textBox = document.querySelector(".text-box"); this.textBox.focus(); this.events(); } events() { this.textBox.addEventListener("keypress", (e) => { if (e.key === "Enter") { this.sendMessage(this.textBox.value); } }); } sendMessage(text) { this.textBox.value = ""; this.textBox.focus(); this.nsSocket.emit("message-to-server", { data: text }); } } <file_sep>/network.js // Bring in the room class const Namespace = require("./model/namespace"); const Room = require("./model/room"); // Set up the namespaces let namespaces = []; let footballNs = new Namespace(0, "football", "/football"); let formula1Ns = new Namespace(1, "formula1", "/formula1"); let musicNs = new Namespace(2, "music", "/music"); namespaces.push(footballNs, formula1Ns, musicNs); // Make the main room and add it to rooms. it will ALWAYS be 0 footballNs.addRoom(new Room(0, "New Articles", "football")); footballNs.addRoom(new Room(1, "Editors", "football")); footballNs.addRoom(new Room(2, "Other", "football")); formula1Ns.addRoom(new Room(0, "Firefox", "formula1")); formula1Ns.addRoom(new Room(1, "SeaMonkey", "formula1")); formula1Ns.addRoom(new Room(2, "SpiderMonkey", "formula1")); formula1Ns.addRoom(new Room(3, "Rust", "formula1")); musicNs.addRoom(new Room(0, "Debian", "music")); musicNs.addRoom(new Room(1, "Red Hat", "music")); musicNs.addRoom(new Room(2, "MacOs", "music")); musicNs.addRoom(new Room(3, "Kernal Development", "music")); module.exports = namespaces; <file_sep>/Dockerfile from node:latest workdir /app copy . . run ["npm", "install"] cmd ["npm", "run", "watch"]
2d0fe01c1db4d37ec02ed1b0d546d5abd6329e56
[ "JavaScript", "Dockerfile", "Shell" ]
6
Shell
msyamsula/chat
f80fa99d4474673823c568a9ec7a2344a5d9a30c
fc30e27ef00aa1e4a437659d97d7699793389282
refs/heads/master
<repo_name>LiuChuang0059/RobustGCN<file_sep>/README.md # RobustGCN This is a sample implementation of "[Robust Graph Convolutional Networks Against Adversarial Attacks](https://zw-zhang.github.io/files/2019_KDD_RGCN.pdf)", KDD 2019. ### Requirements ``` tensorflow >= 1.12 numpy >= 1.14.2 scipy >= 1.1.0 networkx >= 2.0.0 gcn (note that you need to follow https://github.com/tkipf/gcn to correctly install gcn instead of using pip) ``` ### Example Usage ``` python src/train.py --dataset cora ``` ### Full Command List ``` optional arguments: --dataset Dataset string. --learning_rate Initial learning rate. --epochs Number of epochs to train. --hidden Number of units in hidden layer. --dropout Dropout rate (1 - keep probability). --para_var Parameter of variance-based attention. --para_kl Parameter of kl regularization. --para_l2 Parameter for l2 loss. --early_stopping Tolerance for early stopping (# of epochs). ``` ### Cite If you find this code useful, please cite our paper: ``` @inproceedings{zhu2019robust, title={Robust graph convolutional networks against adversarial attacks}, author={<NAME> and <NAME> and <NAME> and <NAME>}, booktitle={Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery \& Data Mining}, pages={1399--1407}, year={2019} } ``` ### Acknowledgement Our code is adapted from the Tensorflow implementation of GCN by <NAME> (https://github.com/tkipf/gcn). <file_sep>/src/train.py import time import tensorflow as tf from utils import * from models import RGCN import random # Set random seed seed = 123 np.random.seed(seed) tf.set_random_seed(seed) random.seed(seed) # Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed' flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.') flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.') flags.DEFINE_integer('hidden', 32, 'Number of units in hidden layer.') flags.DEFINE_float('dropout', 0.6, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('para_var', 1, 'Parameter of variance-based attention') flags.DEFINE_float('para_kl', 5e-4, 'Parameter of kl regularization') flags.DEFINE_float('para_l2', 5e-4, 'Parameter for l2 loss.') flags.DEFINE_integer('early_stopping', 20, 'Tolerance for early stopping (# of epochs).') # Load data adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, label = load_data(FLAGS.dataset) features = preprocess_features(features) support = [preprocess_adj(adj, -0.5), preprocess_adj(adj, -1.0)] placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(2)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32), } model = RGCN(placeholders, input_dim=features[2][1], logging=True) sess = tf.Session() def evaluate(features, support, labels, mask, placeholders, adj): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders, adj) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time() - t_test) sess.run(tf.global_variables_initializer()) cost_val = [] for epoch in range(FLAGS.epochs): t = time.time() feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders, adj) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) outs = sess.run([model.opt_op, model.loss, model.accuracy, model.vars], feed_dict=feed_dict) cost, _, duration = evaluate(features, support, y_val, val_mask, placeholders, adj) cost_val.append(cost) # Print results print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]), "train_acc=", "{:.5f}".format(outs[2]), "time=", "{:.5f}".format(time.time() - t)) if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]): print("Early stopping...") break print("Optimization Finished!") # Testing test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, placeholders, adj) print("Test set results:", "cost=", "{:.5f}".format(test_cost), "accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration)) <file_sep>/src/models.py from layers import * from utils import * flags = tf.app.flags FLAGS = flags.FLAGS class Model(object): def __init__(self, **kwargs): allowed_kwargs = {'name', 'logging'} for kwarg in kwargs.keys(): assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg name = kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name = name logging = kwargs.get('logging', False) self.logging = logging self.vars = {} self.placeholders = {} self.layers = [] self.activations = [] self.inputs = None self.outputs = None self.loss = 0 self.accuracy = 0 self.optimizer = None self.opt_op = None def _build(self): raise NotImplementedError def build(self): with tf.variable_scope(self.name): self._build() self.activations.append(self.inputs) for layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var in variables} self._loss() self._accuracy() self.opt_op = self.optimizer.minimize(self.loss) def predict(self): pass def _loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None): if not sess: raise AttributeError("TensorFlow session not provided.") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, "tmp/%s.ckpt" % self.name) print("Model saved in file: %s" % save_path) def load(self, sess=None): if not sess: raise AttributeError("TensorFlow session not provided.") saver = tf.train.Saver(self.vars) save_path = "tmp/%s.ckpt" % self.name saver.restore(sess, save_path) print("Model restored from file: %s" % save_path) class RGCN(Model): def __init__(self, placeholders, input_dim, **kwargs): super(RGCN, self).__init__(**kwargs) self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) self.build() def _loss(self): self.loss += FLAGS.para_l2 * (tf.nn.l2_loss(self.layers[0].vars['weights_0']) ) mean = self.layers[0].vars['mean'] var = self.layers[0].vars['var'] KL_divergence = 0.5 * tf.reduce_mean(tf.square(mean) + var - tf.log(1e-8 + var) - 1, 1) KL_divergence = tf.reduce_sum(KL_divergence) self.loss += FLAGS.para_kl * KL_divergence self.vars = self.layers[1].vars['var'] self.mean = self.layers[1].vars['mean'] self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _accuracy(self): mean_vector = tf.slice(self.outputs, [0, 0], [-1, self.output_dim]) self.accuracy = masked_accuracy(mean_vector, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GGCL_F(input_dim=self.input_dim, output_dim=FLAGS.hidden, placeholders=self.placeholders, dropout=True, sparse_inputs=True, logging=self.logging)) self.layers.append(GGCL_D(input_dim=FLAGS.hidden, output_dim=self.output_dim, placeholders=self.placeholders, dropout=True, logging=self.logging)) def predict(self): return tf.nn.softmax(self.outputs) <file_sep>/src/layers.py from gcn.inits import * import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS _LAYER_UIDS = {} def get_layer_uid(layer_name=''): if layer_name not in _LAYER_UIDS: _LAYER_UIDS[layer_name] = 1 return 1 else: _LAYER_UIDS[layer_name] += 1 return _LAYER_UIDS[layer_name] def sparse_dropout(x, keep_prob, noise_shape): """Dropout for sparse tensors.""" random_tensor = keep_prob random_tensor += tf.random_uniform(noise_shape) dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) pre_out = tf.sparse_retain(x, dropout_mask) return pre_out * (1./keep_prob) def dot(x, y, sparse=False): if sparse: res = tf.sparse_tensor_dense_matmul(x, y) else: res = tf.matmul(x, y) return res class Layer(object): def __init__(self, **kwargs): allowed_kwargs = {'name', 'logging'} for kwarg in kwargs.keys(): assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg name = kwargs.get('name') if not name: layer = self.__class__.__name__.lower() name = layer + '_' + str(get_layer_uid(layer)) self.name = name self.vars = {} logging = kwargs.get('logging', False) self.logging = logging self.sparse_inputs = False def _call(self, inputs): return inputs def __call__(self, inputs): with tf.name_scope(self.name): if self.logging and not self.sparse_inputs: tf.summary.histogram(self.name + '/inputs', inputs) outputs = self._call(inputs) if self.logging: tf.summary.histogram(self.name + '/outputs', outputs) return outputs def _log_vars(self): for var in self.vars: tf.summary.histogram(self.name + '/vars/' + var, self.vars[var]) class GGCL_F(Layer): """GGCL: the input is feature""" def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False, bias=False, featureless=False, **kwargs): super(GGCL_F, self).__init__(**kwargs) if dropout: self.dropout = placeholders['dropout'] else: self.dropout = 0. self.support = placeholders['support'] self.sparse_inputs = sparse_inputs self.featureless = featureless self.bias = bias self.output_dim = output_dim self.num_features_nonzero = placeholders['num_features_nonzero'] with tf.variable_scope(self.name + '_vars'): self.vars['weights_0'] = glorot([input_dim, output_dim], name='weights_0') if self.bias: self.vars['bias'] = zeros([output_dim], name='bias') if self.logging: self._log_vars() def _call(self, inputs): x = inputs if self.sparse_inputs: x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero) else: x = tf.nn.dropout(x, 1-self.dropout) supports = list() i = 0 if not self.featureless: pre_sup = dot(x, self.vars['weights_' + str(i)], sparse=self.sparse_inputs) else: pre_sup = self.vars['weights_' + str(i)] support = dot(self.support[i], pre_sup, sparse=True) supports.append(support) dim = int(self.output_dim / 2) mean_vector = tf.nn.elu(tf.slice(pre_sup, [0, 0], [-1, dim])) var_vector = tf.nn.relu(tf.slice(pre_sup, [0, dim], [-1, dim])) self.vars['mean'] = mean_vector self.vars['var'] = var_vector node_weight = tf.exp(-var_vector*FLAGS.para_var) mean_out = dot(self.support[0], mean_vector * node_weight, sparse=True) var_out = dot(self.support[1], var_vector * node_weight * node_weight, sparse=True) output = tf.concat([mean_out, var_out], axis=1) return output class GGCL_D(Layer): """GGCL: the input is distribution""" def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False, bias=False, featureless=False, **kwargs): super(GGCL_D, self).__init__(**kwargs) if dropout: self.dropout = placeholders['dropout'] else: self.dropout = 0. self.support = placeholders['support'] self.sparse_inputs = sparse_inputs self.featureless = featureless self.bias = bias self.dim = int(input_dim / 2) self.num_features_nonzero = placeholders['num_features_nonzero'] with tf.variable_scope(self.name + '_vars'): self.vars['weights_mean'] = glorot([self.dim, output_dim], name='weights_mean') self.vars['weights_var'] = glorot([self.dim, output_dim], name='weights_var') if self.bias: self.vars['bias'] = zeros([output_dim], name='bias') if self.logging: self._log_vars() def _call(self, inputs): x = inputs if self.sparse_inputs: x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero) else: x = tf.nn.dropout(x, 1-self.dropout) mean_vector = tf.slice(x, [0, 0], [-1, self.dim]) var_vector = tf.slice(x, [0, self.dim], [-1, self.dim]) mean_vector = tf.nn.elu(dot(mean_vector, self.vars['weights_mean'])) var_vector = tf.nn.relu(dot(var_vector, self.vars['weights_var'])) node_weight = tf.exp(-var_vector*FLAGS.para_var) mean_out = dot(self.support[0], mean_vector * node_weight, sparse=True) var_out = dot(self.support[1], var_vector * node_weight * node_weight, sparse=True) self.vars['var'] = var_out sample_v = tf.random_normal(tf.shape(var_out), 0, 1, dtype=tf.float32) mean_out = mean_out + (tf.math.sqrt(var_out + 1e-8) * sample_v) self.vars['mean'] = tf.nn.softmax(mean_out) output = mean_out return output
54562357088317383d95100610bb8374305fb307
[ "Markdown", "Python" ]
4
Markdown
LiuChuang0059/RobustGCN
d4cb3346b438cbe207c1b9e7c4499bd29e65febe
f1a7f6abeab3c82661e6497e601c02e9d55b0184
refs/heads/master
<repo_name>JakubRaban/project-manager<file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/aggregations/ParticipantList.java package pl.edu.agh.gastronomiastosowana.model.aggregations; import pl.edu.agh.gastronomiastosowana.model.Participant; public class ParticipantList extends AbstractAggregatedList<Participant> { public ParticipantList() { super(); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/exceptions/NonPresentParticipantRemovalException.java package pl.edu.agh.gastronomiastosowana.model.exceptions; public class NonPresentParticipantRemovalException extends Exception { } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/Participant.java package pl.edu.agh.gastronomiastosowana.model; import javafx.beans.property.*; import javafx.collections.FXCollections; import javax.persistence.*; import java.time.LocalDate; import java.util.HashSet; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @Entity public class Participant { @Id @GeneratedValue(strategy = GenerationType.AUTO) private int participantID; @Transient private StringProperty name; @Transient private StringProperty surname; @Transient private StringProperty indexNumber; @Transient private StringProperty email; @Transient private BooleanProperty subscribed; @Transient private ObjectProperty<LocalDate> registrationDate; @Transient private SetProperty<ProjectGroup> participatesIn; @Transient private SetProperty<ProjectGroup> leaderIn; @Transient private SetProperty<Rating> ratings; public Participant(String name, String surname, String age, String email) { this(); setName(name); setSurname(surname); setIndexNumber(age); setEmail(email); } public Participant() { participatesIn = new SimpleSetProperty<>(this, "worksFor"); leaderIn = new SimpleSetProperty<>(this, "managedProjectGroup"); name = new SimpleStringProperty(this, "name"); surname = new SimpleStringProperty(this, "surname"); indexNumber = new SimpleStringProperty(this, "indexNumber"); email = new SimpleStringProperty(this, "email"); subscribed = new SimpleBooleanProperty(this, "subscribed"); registrationDate = new SimpleObjectProperty<LocalDate>(this, "registrationDate"); ratings = new SimpleSetProperty<>(this, "ratings"); setParticipatesIn(new HashSet<>()); setLeaderIn(new HashSet<>()); setRegistrationDate(LocalDate.now()); } @Access(AccessType.PROPERTY) @Column(nullable = false) public String getName() { return name.get(); } public void setName(String name) { this.name.set(name); } public StringProperty nameProperty() { return name; } @Access(AccessType.PROPERTY) @Column(nullable = false) public String getSurname() { return surname.get(); } public void setSurname(String surname) { this.surname.set(surname); } public StringProperty surnameProperty() { return surname; } @Access(AccessType.PROPERTY) @Column(nullable = false) public String getIndexNumber() { return indexNumber.get(); } public void setIndexNumber(String age) { this.indexNumber.set(age); } public StringProperty indexNumberProperty() { return indexNumber; } @Access(AccessType.PROPERTY) @Column(nullable = false, unique = true) public String getEmail() { return email.get(); } public void setEmail(String email) { this.email.set(email); } public StringProperty emailProperty() { return email; } @Access(AccessType.PROPERTY) @Column(nullable = false) public LocalDate getRegistrationDate() { return registrationDate.get(); } public void setRegistrationDate(LocalDate registrationDateProperty) { this.registrationDate.set(registrationDateProperty); } public ObjectProperty<LocalDate> registrationDateProperty() { return registrationDate; } @Access(AccessType.PROPERTY) @Column(nullable = false) public boolean isSubscribed() { return subscribed.get(); } public void setSubscribed(boolean subscribed) { this.subscribed.set(subscribed); } public BooleanProperty subscribedProperty() { return subscribed; } @Access(AccessType.PROPERTY) @ManyToMany public Set<ProjectGroup> getParticipatesIn() { return participatesIn.get(); } public void setParticipatesIn(Set<ProjectGroup> projectGroups) { this.participatesIn.set(FXCollections.observableSet(projectGroups)); } public SetProperty<ProjectGroup> worksForProperty() { return participatesIn; } @Access(AccessType.PROPERTY) @OneToMany(mappedBy = "leader") public Set<ProjectGroup> getLeaderIn() { return leaderIn.get(); } public void setLeaderIn(Set<ProjectGroup> projectGroups) { this.leaderIn.set(FXCollections.observableSet(projectGroups)); } public SetProperty<ProjectGroup> managedProjectGroupsProperty() { return leaderIn; } @Access(AccessType.PROPERTY) @OneToMany(mappedBy = "participant") public Set<Rating> getRating() { return ratings.getValue(); } public void setRating(Set<Rating> ratings) { if (ratings == null) this.ratings.setValue(FXCollections.emptyObservableSet()); else this.ratings.setValue(FXCollections.observableSet(ratings)); } public SetProperty<Rating> ratingProperty() { return ratings; } public void addRating(Rating rating) { this.ratings.add(rating); } public boolean isParticipantIn(ProjectGroup projectGroup) { return getParticipatesIn().contains(projectGroup); } public String getFullName() { return getName() + " " + getSurname(); } @SuppressWarnings("unused") public String getNameEmailLabel() { return getName() + " " + getSurname() + " (" + getEmail() + ")\n" + getIndexNumber(); } //Leaderboard specific methods private Set<Rating> getRatingsAssociatedWithGroup(Optional projectGroup){ if (projectGroup.isPresent()){ return ratings.stream(). filter(rating -> rating.getAssessedGroup() == projectGroup.get()). collect(Collectors.toSet()); } else{ return ratings; } } public double getAverageRating(Optional projectGroup){ return getRatingsAssociatedWithGroup(projectGroup).stream(). mapToDouble(participant -> ( participant.getRatingDetails().getRatingValue() / participant.getRatingDetails().getMaxRatingValue())). average(). orElse(Double.NEGATIVE_INFINITY); } public long getRatingCount(Optional projectGroup){ return getRatingsAssociatedWithGroup(projectGroup).size(); } public double getRatingSum(Optional projectGroup){ return getRatingsAssociatedWithGroup(projectGroup).stream(). mapToDouble(participant -> participant.getRatingDetails().getRatingValue()). sum(); } public double getMaxRatingSum(Optional projectGroup){ return getRatingsAssociatedWithGroup(projectGroup).stream(). mapToDouble(participant -> participant.getRatingDetails().getMaxRatingValue()). sum(); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/interactions/ItemInputType.java package pl.edu.agh.gastronomiastosowana.model.interactions; public enum ItemInputType { NEW_ITEM, EDIT_ITEM } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/presenter/TaskEditPanePresenter.java package pl.edu.agh.gastronomiastosowana.presenter; import javafx.event.ActionEvent; import javafx.fxml.FXML; import javafx.scene.control.DatePicker; import javafx.scene.control.TextField; import pl.edu.agh.gastronomiastosowana.model.Project; import pl.edu.agh.gastronomiastosowana.model.Task; import java.time.LocalDate; import java.util.Optional; public class TaskEditPanePresenter extends AbstractPresenter{ Task task; @FXML private TextField taskTitleInput; @FXML private TextField taskDetailsInput; @FXML private DatePicker deadlineInput; @FXML private void initialize() { super.initialize("Task"); setTask(new Task()); } @Override public Optional<String> validateInput(){ String title = Optional.ofNullable(taskTitleInput.getText()).orElse("").trim(); LocalDate deadline = deadlineInput.getValue(); if (title.isEmpty()) return Optional.of("Task title cannot be empty"); if (deadline != null && deadline.compareTo(LocalDate.now()) < 0) return Optional.of("Deadline cannot be in the past!"); return Optional.empty(); } public void update() { task.setTitle(taskTitleInput.getText().trim()); task.setDetails(taskDetailsInput.getText().trim()); if (deadlineInput.getValue() != null) { task.setDeadline(deadlineInput.getValue()); } } @FXML void clearDeadlineInput(ActionEvent event) { deadlineInput.setValue(null); } public void setTask(Task task){ this.task = task; if(task == null){ taskTitleInput.clear(); taskDetailsInput.clear(); deadlineInput.setValue(null); } else{ taskTitleInput.setText(task.getTitle()); taskDetailsInput.setText(task.getDetails()); deadlineInput.setValue(task.getDeadline()); } } public Task getTask(){ return this.task; } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/ApplicationRunner.java package pl.edu.agh.gastronomiastosowana; import javafx.application.Application; public class ApplicationRunner { public static void main(String[] args) { Application.launch(MainWindow.class, args); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/exceptions/InvalidRatingValueException.java package pl.edu.agh.gastronomiastosowana.model.exceptions; public class InvalidRatingValueException extends Exception { } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/ratings/RatingAggregator.java package pl.edu.agh.gastronomiastosowana.model.ratings; import pl.edu.agh.gastronomiastosowana.model.Rating; import pl.edu.agh.gastronomiastosowana.model.RatingDetails; import java.util.List; import java.util.function.ToDoubleFunction; public class RatingAggregator { private List<Rating> ratings; public RatingAggregator(List<Rating> ratings) { this.ratings = ratings; } public double sumOfValues() { return this.sumRatingDetailsWithMappingFunction(RatingDetails::getRatingValue); } public double sumOfMaxValues() { return this.sumRatingDetailsWithMappingFunction(RatingDetails::getMaxRatingValue); } public double averageValueToMaxRatio() { return sumOfValues() / sumOfMaxValues(); } public double averageValueToMaxRatioPercent() { return averageValueToMaxRatio() * 100; } private double sumRatingDetailsWithMappingFunction(ToDoubleFunction<RatingDetails> function) { return this.ratings.stream() .map(Rating::getRatingDetails) .mapToDouble(function) .sum(); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/exceptions/LeaderRemovalException.java package pl.edu.agh.gastronomiastosowana.model.exceptions; public class LeaderRemovalException extends Exception { } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/Task.java package pl.edu.agh.gastronomiastosowana.model; import javafx.beans.property.ObjectProperty; import javafx.beans.property.SimpleObjectProperty; import javafx.beans.property.SimpleStringProperty; import javafx.beans.property.StringProperty; import net.bytebuddy.asm.Advice; import javax.persistence.*; import java.time.LocalDate; import java.util.Date; @Entity public class Task { @Id @GeneratedValue(strategy = GenerationType.AUTO) private int taskID; @Transient private StringProperty title; @Transient private StringProperty details; @Transient private ObjectProperty<LocalDate> creationDate; @Transient private ObjectProperty<LocalDate> deadline; @ManyToOne private Project assessedProject; public Task(){ title = new SimpleStringProperty(this, "title"); details = new SimpleStringProperty(this, "details"); creationDate = new SimpleObjectProperty<>(this, "creationDate"); deadline = new SimpleObjectProperty<>(this, "deadline"); setCreationDate(LocalDate.now()); } public Task(String title, String details, LocalDate creationDate, LocalDate deadline, Project assessedProject){ this(); setTitle(title); setDetails(details); setCreationDate(creationDate); setDeadline(deadline); setAssessedProject(assessedProject); } @Access(AccessType.PROPERTY) @Column(nullable = false) public String getTitle(){ return title.get(); } public void setTitle(String title){ this.title.set(title); } public StringProperty titleProperty(){ return this.title; } @Access(AccessType.PROPERTY) public String getDetails(){ return details.get(); } public void setDetails(String details){ this.details.set(details); } public StringProperty detailsProperty(){ return this.details; } @Access(AccessType.PROPERTY) @Column(nullable = false) public LocalDate getCreationDate(){ return creationDate.get(); } public void setCreationDate(LocalDate creationDate){ this.creationDate.set(creationDate); } public ObjectProperty<LocalDate> creationDateProperty(){ return this.creationDate; } @Access(AccessType.PROPERTY) public LocalDate getDeadline(){ return deadline.get(); } public void setDeadline(LocalDate deadline){ this.deadline.set(deadline); } public ObjectProperty<LocalDate> deadlineProperty(){ return this.deadline; } public void setAssessedProject(Project project) { this.assessedProject = project; } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/dao/ProjectDao.java package pl.edu.agh.gastronomiastosowana.dao; import org.hibernate.Session; import pl.edu.agh.gastronomiastosowana.model.Project; import pl.edu.agh.gastronomiastosowana.session.SessionService; import javax.persistence.NoResultException; import javax.persistence.TypedQuery; import java.time.LocalDate; import java.util.List; import java.util.Objects; import java.util.Optional; public class ProjectDao extends GenericDao<Project> { public ProjectDao() { super(); } public Optional<Project> findProjectByName(String name) { Objects.requireNonNull(name); Session session = SessionService.getSession(); TypedQuery<Project> projectQuery = session.createQuery( "from Project p where p.name = :name", Project.class ); projectQuery.setParameter("name", name); try { return Optional.of(projectQuery.getSingleResult()); } catch (NoResultException e) { return Optional.empty(); } } public Optional<Project> findProjectByProjectGroupName(String name) { Objects.requireNonNull(name); Session session = SessionService.getSession(); TypedQuery<Project> projectQuery = session.createQuery( "from Project p where p.projectGroup.groupName = :name", Project.class ); projectQuery.setParameter("name", name); try { return Optional.of(projectQuery.getSingleResult()); } catch (NoResultException e) { return Optional.empty(); } } public List<Project> findActiveProjects() { Session session = SessionService.getSession(); TypedQuery<Project> projectQuery = session.createQuery( "from Project p where p.startDate <= :today and p.endDate >= :today", Project.class ); projectQuery.setParameter("today", LocalDate.now()); return projectQuery.getResultList(); } public List<Project> findArchivalProjects() { Session session = SessionService.getSession(); TypedQuery<Project> projectQuery = session.createQuery( "from Project p where p.endDate < :today", Project.class ); projectQuery.setParameter("today", LocalDate.now()); return projectQuery.getResultList(); } public List<Project> findFutureProjects() { Session session = SessionService.getSession(); TypedQuery<Project> projectQuery = session.createQuery( "from Project p where p.startDate > :today", Project.class ); projectQuery.setParameter("today", LocalDate.now()); return projectQuery.getResultList(); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/aggregations/ProjectGroupList.java package pl.edu.agh.gastronomiastosowana.model.aggregations; import pl.edu.agh.gastronomiastosowana.model.ProjectGroup; public class ProjectGroupList extends AbstractAggregatedList<ProjectGroup> { public ProjectGroupList() { super(); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/dao/ParticipantDao.java package pl.edu.agh.gastronomiastosowana.dao; import org.hibernate.Session; import pl.edu.agh.gastronomiastosowana.model.Participant; import pl.edu.agh.gastronomiastosowana.model.ProjectGroup; import pl.edu.agh.gastronomiastosowana.session.SessionService; import javax.persistence.TypedQuery; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; public class ParticipantDao extends GenericDao<Participant> { public ParticipantDao() { super(); } public Optional<Participant> findParticipantByEmail(String email){ final Session session = SessionService.getSession(); TypedQuery<Participant> participantsByEmailQuery = session.createQuery("from Participant as participant " + "where participant.email = :email", Participant.class); participantsByEmailQuery.setParameter("email", email); return participantsByEmailQuery.getResultStream().findFirst(); } public List<Participant> findParticipantsAssignedTo(ProjectGroup projectGroup) { return findAll().stream() .filter(participant -> participant.isParticipantIn(projectGroup)) .collect(Collectors.toList()); } public List<Participant> findParticipantsNotAssignedTo(ProjectGroup projectGroup) { return findAll().stream() .filter(participant -> !participant.isParticipantIn(projectGroup)) .collect(Collectors.toList()); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/presenter/ProjectEditPanePresenter.java package pl.edu.agh.gastronomiastosowana.presenter; import javafx.event.ActionEvent; import javafx.fxml.FXML; import javafx.scene.control.Button; import javafx.scene.control.DatePicker; import javafx.scene.control.Label; import javafx.scene.control.TextField; import pl.edu.agh.gastronomiastosowana.model.Project; import java.time.LocalDate; import java.util.Optional; public class ProjectEditPanePresenter extends AbstractPresenter { private Project project; @FXML private TextField projectNameInput; @FXML private DatePicker startDateInput; @FXML private DatePicker endDateInput; @FXML private TextField projectGroupInput; @FXML private Label projectGroupLabel; @FXML private Button projectGroupCancelButton; @FXML private void initialize() { super.initialize("Project"); setProject(new Project()); } public Optional<String> validateInput() { String name = Optional.ofNullable(projectNameInput.getText()).orElse("").trim(); LocalDate startDate = startDateInput.getValue(); LocalDate endDate = endDateInput.getValue(); if (name.isEmpty()) return Optional.of("Project name cannot be empty"); if (startDate == null) return Optional.of("Start date cannot be empty"); if (endDate != null && startDate.compareTo(endDate) > 0) return Optional.of("Start date is greater than end date"); return Optional.empty(); } public void update() { project.setName(projectNameInput.getText().trim()); project.setStartDate(startDateInput.getValue()); if (endDateInput.getValue() != null) { project.setEndDate(endDateInput.getValue()); } if ( ! projectGroupInput.getText().equals("")) { project.setProjectGroup(projectGroupInput.getText().trim()); } } @FXML void clearStartDateInput(ActionEvent event) { startDateInput.setValue(null); } @FXML void clearEndDateInput(ActionEvent event) { endDateInput.setValue(null); } public Project getProject() { return project; } public void setProject(Project project) { this.project = project; if (project == null) { projectNameInput.clear(); startDateInput.setValue(null); endDateInput.setValue(null); projectGroupInput.clear(); } else { projectNameInput.setText(project.getName()); startDateInput.setValue(project.getStartDate()); endDateInput.setValue(project.getEndDate()); if (project.getProjectGroup() != null) { projectGroupInput.setVisible(false); projectGroupLabel.setVisible(true); projectGroupLabel.setText(project.getProjectGroup().getGroupName()); projectGroupCancelButton.setVisible(true); //projectGroupInput.setText(project.getProjectGroup().getGroupName()); } else { projectGroupInput.setVisible(true); projectGroupLabel.setVisible(false); projectGroupCancelButton.setVisible(false); projectGroupInput.setText(""); } //if project has project group assigned - show project group label //else show text box with project group input } } public void cancelGroupAssignment(){ project.cancelProjectGroupAssignment(); projectGroupInput.setVisible(true); projectGroupLabel.setVisible(false); projectGroupCancelButton.setVisible(false); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/ProjectGroup.java package pl.edu.agh.gastronomiastosowana.model; import javafx.beans.property.*; import pl.edu.agh.gastronomiastosowana.model.exceptions.LeaderRemovalException; import pl.edu.agh.gastronomiastosowana.model.exceptions.NonPresentParticipantRemovalException; import javax.persistence.*; import java.time.LocalDate; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; @Entity public class ProjectGroup { @Id @GeneratedValue(strategy = GenerationType.AUTO) private int projectGroupID; @Transient private StringProperty groupName; @Transient private BooleanProperty active; @Transient private ObjectProperty<LocalDate> creationDate; @Transient private ObjectProperty<Project> project; @ManyToOne private Participant leader; @ManyToMany(mappedBy = "participatesIn") private Set<Participant> participants; @OneToMany(mappedBy = "assessedGroup") private Set<Rating> ratings; public ProjectGroup(String groupName) { this(); setGroupName(groupName); setCreationDate(LocalDate.now()); setActive(true); } public ProjectGroup() { groupName = new SimpleStringProperty(this, "groupName"); active = new SimpleBooleanProperty(this, "active"); creationDate = new SimpleObjectProperty<>(this, "creationDate"); project = new SimpleObjectProperty<>(this, "project"); participants = new HashSet<>(); } @Access(AccessType.PROPERTY) @Column(unique = true, nullable = false) public String getGroupName() { return groupName.get(); } public void setGroupName(String groupName) { this.groupName.set(groupName); } public StringProperty groupNameProperty() { return groupName; } @Access(AccessType.PROPERTY) @Column(nullable = false) public boolean isActive() { return active.get(); } public void setActive(boolean active) { this.active.set(active); } public BooleanProperty activeProperty() { return active; } @Access(AccessType.PROPERTY) @Column(nullable = false) public LocalDate getCreationDate() { return creationDate.get(); } public void setCreationDate(LocalDate creationDate) { this.creationDate.set(creationDate); } public ObjectProperty<LocalDate> creationDateProperty() { return creationDate; } @Access(AccessType.PROPERTY) @OneToOne(mappedBy = "projectGroup") public Project getProject() { return project.get(); } public void setProject(Project project) { this.project.setValue(project); } public ObjectProperty<Project> projectProperty() { return project; } public Participant getLeader() { return leader; } public void setLeader(Participant newLeader) { if (this.leader != null) { this.leader.getLeaderIn().remove(this); } this.leader = newLeader; newLeader.getLeaderIn().add(this); addParticipant(leader); } public void addParticipant(Participant participant) { participants.add(participant); participant.getParticipatesIn().add(this); } public void removeParticipant(Participant participant) throws NonPresentParticipantRemovalException, LeaderRemovalException { if (!participants.contains(participant)) { throw new NonPresentParticipantRemovalException(); } if (participant == this.leader) { throw new LeaderRemovalException(); } participants.remove(participant); participant.getParticipatesIn().remove(this); } public Set<Participant> getParticipants() { return participants; } public int getParticipantCount() { return this.participants.size(); } public Participant getParticipantByFullName(String fullName) { return participants.stream() .filter(p -> p.getFullName().equals(fullName)) .collect(Collectors.toList()) .get(0); } public void setParticipants(Set<Participant> participants) { this.participants = participants; } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/presenter/ProjectGroupRatingEditPanePresenter.java package pl.edu.agh.gastronomiastosowana.presenter; import javafx.beans.binding.BooleanBinding; import javafx.collections.FXCollections; import javafx.fxml.FXML; import javafx.scene.control.*; import pl.edu.agh.gastronomiastosowana.dao.ParticipantDao; import pl.edu.agh.gastronomiastosowana.dao.RatingDao; import pl.edu.agh.gastronomiastosowana.model.Participant; import pl.edu.agh.gastronomiastosowana.model.ProjectGroup; import pl.edu.agh.gastronomiastosowana.model.Rating; import pl.edu.agh.gastronomiastosowana.model.RatingDetails; import pl.edu.agh.gastronomiastosowana.model.exceptions.InvalidRatingValueException; import pl.edu.agh.gastronomiastosowana.model.interactions.ItemInputType; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; public class ProjectGroupRatingEditPanePresenter extends AbstractPresenter { private ProjectGroup projectGroup; private Rating rating; private List<Rating> newRatings = new ArrayList<>(); private RatingDao ratingDao = new RatingDao(); private ParticipantDao participantDao = new ParticipantDao(); @FXML private RadioButton singleMemberRatingRadioButton; @FXML private RadioButton allMembersRatingRadioButton; @FXML private TextField titleTextField; @FXML private ComboBox<String> participantNameBox; @FXML private TextField gradeTextField; @FXML private TextField maxGradeTextField; @FXML private TextArea commentTextArea; public void initialize() { super.initialize("Rating"); setProjectGroup(new ProjectGroup()); bindComboBox(); } public void setProjectGroup(ProjectGroup projectGroup) { this.projectGroup = projectGroup; setParticipantsListItems(); } public void setRating(Rating rating) { this.rating = rating; if(rating != null) { this.setItemInputType(ItemInputType.EDIT_ITEM); this.singleMemberRatingRadioButton.setSelected(true); this.allMembersRatingRadioButton.setDisable(true); this.titleTextField.setText(rating.getRatingTitle()); this.participantNameBox.setValue(rating.getParticipant().getFullName()); this.participantNameBox.disableProperty().unbind(); this.participantNameBox.setDisable(true); this.gradeTextField.setText(Double.toString(rating.getRatingDetails().getRatingValue())); this.maxGradeTextField.setText(Double.toString(rating.getRatingDetails().getMaxRatingValue())); this.commentTextArea.setText(rating.getComment()); } } private void bindComboBox() { BooleanBinding disableParticipantSelectionProperty = singleMemberRatingRadioButton.selectedProperty().not(); participantNameBox.disableProperty().bind(disableParticipantSelectionProperty); } private void setParticipantsListItems() { var groupParticipants = projectGroup.getParticipants(); var groupParticipantsNames = groupParticipants .stream() .map(Participant::getFullName) .collect(Collectors.toList()); groupParticipantsNames.add(0, ""); participantNameBox.setItems(FXCollections.observableList(groupParticipantsNames)); } @Override public Optional<String> validateInput() { String participantName = Optional.ofNullable(participantNameBox.getValue()).orElse(""); String comment = Optional.ofNullable(commentTextArea.getText()).orElse(""); String gradeText = gradeTextField.getText().replace(",", "."); String maxGradeText = maxGradeTextField.getText().replace(",", "."); String title = Optional.ofNullable(titleTextField.getText()).orElse(""); if(participantName.isEmpty() && singleMemberRatingRadioButton.isSelected()) { return Optional.of("No participant to rate was chosen"); } try { double ratingValue = Double.parseDouble(gradeText); double maxRatingValue = Double.parseDouble(maxGradeText); if (getItemInputType() == ItemInputType.NEW_ITEM) { if (singleMemberRatingRadioButton.isSelected()) { var ratedParticipant = projectGroup.getParticipantByFullName(participantName); this.newRatings.add(new Rating(title, projectGroup, ratedParticipant, new RatingDetails(ratingValue, maxRatingValue), comment)); } else { for (Participant participant : this.projectGroup.getParticipants()) { this.newRatings.add(new Rating(title, projectGroup, participant, new RatingDetails(ratingValue, maxRatingValue), comment)); } } } else { rating.setRatingTitle(title); rating.setRatingDetails(new RatingDetails(ratingValue, maxRatingValue)); rating.setComment(comment); } } catch (NumberFormatException e) { if (gradeText.isEmpty()) return Optional.of("No grade was given"); return Optional.of("Incorrect number format"); } catch (InvalidRatingValueException e) { return Optional.of("Wrong rating value (rating smaller than 0 or greater than max)"); } return Optional.empty(); } @Override public void update() { if (getItemInputType() == ItemInputType.NEW_ITEM) { ratingDao.save(newRatings.toArray(new Rating[0])); newRatings.get(0).getParticipant().addRating(newRatings.get(0)); participantDao.update(newRatings.get(0).getParticipant()); } else { rating.getParticipant().addRating(rating); participantDao.update(rating.getParticipant()); ratingDao.update(rating); } } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/aggregations/ParticipantProjectGroupList.java package pl.edu.agh.gastronomiastosowana.model.aggregations; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import pl.edu.agh.gastronomiastosowana.model.Participant; import pl.edu.agh.gastronomiastosowana.model.ProjectGroup; import pl.edu.agh.gastronomiastosowana.model.aggregations.model_wrapper.ParticipantProjectGroup; import java.util.LinkedList; import java.util.List; import java.util.Optional; public class ParticipantProjectGroupList extends AbstractAggregatedList<ParticipantProjectGroup>{ public ParticipantProjectGroupList(){ super(); } public void setWithParticipants(ParticipantList participantList){ List<ParticipantProjectGroup> participantProjectGroups = new LinkedList<ParticipantProjectGroup>(); for (Participant p : participantList.getElements()){ participantProjectGroups.add(new ParticipantProjectGroup(p)); } System.out.println(participantProjectGroups); this.setElements(FXCollections.observableList(participantProjectGroups)); } public void setSelectedProjectGroup(Optional<ProjectGroup> selectedProjectGroup){ for (ParticipantProjectGroup p : this.elements){ System.out.println("OK"); p.setSelectedProjectGroup(selectedProjectGroup); } } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/Critic.java package pl.edu.agh.gastronomiastosowana.model; import javafx.beans.property.SimpleStringProperty; import javafx.beans.property.StringProperty; import javax.persistence.*; @Entity public class Critic { @Id @GeneratedValue(strategy = GenerationType.AUTO) private int criticID; @Transient private StringProperty name; @Transient private StringProperty surname; @Transient private StringProperty email; public Critic(String name, String surname, String email) { this(); setName(name); setSurname(surname); setEmail(email); } public Critic() { this.name = new SimpleStringProperty(this, "name"); this.surname = new SimpleStringProperty(this, "surname"); this.email = new SimpleStringProperty(this, "email"); } @Access(AccessType.PROPERTY) @Column(nullable = false) public String getName() { return name.get(); } public void setName(String name) { this.name.set(name); } public StringProperty nameProperty() { return name; } @Access(AccessType.PROPERTY) @Column(nullable = false) public String getSurname(String surname) { return this.surname.get(); } public void setSurname(String surname) { this.surname.set(surname); } public StringProperty surnameProperty() { return surname; } @Access(AccessType.PROPERTY) @Column(nullable = false, unique = true) public String getEmail() { return this.email.get(); } public void setEmail(String email) { this.email.set(email); } public StringProperty emailProperty() { return email; } public int getCriticID() { return criticID; } } <file_sep>/settings.gradle rootProject.name = 'gastromanager' <file_sep>/README.md # Project Manager A JavaFX application made for teachers who want to have control over the projects they supervise. Allows creating projects, assigning students to those groups and giving them ratings. Those ratings are later presented in a form of leaderboard which can be exported to a text file. ## Tech stack - Java 11 - JavaFX (for window GUI and MVC model) - Hibernate - SQL database (MySQL was used) ## How it works - Teacher creates new project and assigns start and end dates. A project group is automatically created ![Create project](src/main/resources/images/add_project.png) - Teacher adds any number of participants and assigns them to a specific project (one participant can take part in many projects) ![Add participants](src/main/resources/images/new_participant.png) ![Assign participants](src/main/resources/images/assign_participants.png) - Participants who are assigned to a project can be given ratings. Teacher can rate a single member or bulk rate every member. ![Rate participant](src/main/resources/images/rate_participant.png) - Every group's ratings can be later seen and exported to a text file. Each participant's combined rating can be seen in a leaderboard ![Group ratings](src/main/resources/images/group_ratings.png) ![Leaderboard](src/main/resources/images/leaderboard.png) ## Internals - Project uses Hibernate which maps Java objects to relational database entities. Because JavaFX is used, the fields are actually of JavaFX Property type, and values returned by getters are mapped to database fields - Data Access Objects (DAO) are used to perform database queries. Those are written in HQL (Hibernate Query Language) <file_sep>/build.gradle plugins { id 'java' id 'application' id 'org.openjfx.javafxplugin' version '0.0.8' } javafx { version = "12" modules = [ 'javafx.controls', 'javafx.graphics', "javafx.fxml" ] } mainClassName = "gastromanager/pl.edu.agh.gastronomiastosowana.Main" group 'pl.edu.agh.gastronomiastosowana' version '1.0-SNAPSHOT' sourceCompatibility = 1.12 repositories { mavenCentral() } dependencies { testCompile group: 'junit', name: 'junit', version: '4.12' implementation 'org.hibernate:hibernate-core:5.4.9.Final' implementation 'mysql:mysql-connector-java:5.1.6' compile group: 'javax.mail', name: 'mail', version: '1.4.1' } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/dao/RatingDao.java package pl.edu.agh.gastronomiastosowana.dao; import org.hibernate.Session; import pl.edu.agh.gastronomiastosowana.model.Critic; import pl.edu.agh.gastronomiastosowana.model.ProjectGroup; import pl.edu.agh.gastronomiastosowana.model.Rating; import pl.edu.agh.gastronomiastosowana.session.SessionService; import javax.persistence.TypedQuery; import java.util.List; import java.util.Optional; public class RatingDao extends GenericDao<Rating> { public RatingDao() { super(); } public List<Rating> findByProjectGroup(ProjectGroup projectGroup) { final Session session = SessionService.getSession(); TypedQuery<Rating> ratingQuery = session.createQuery( "from Rating r where r.assessedGroup = :projectGroup", Rating.class ); ratingQuery.setParameter("projectGroup", projectGroup); return ratingQuery.getResultList(); } } <file_sep>/src/main/java/pl/edu/agh/gastronomiastosowana/model/aggregations/model_wrapper/ParticipantProjectGroup.java package pl.edu.agh.gastronomiastosowana.model.aggregations.model_wrapper; import pl.edu.agh.gastronomiastosowana.model.Participant; import pl.edu.agh.gastronomiastosowana.model.ProjectGroup; import pl.edu.agh.gastronomiastosowana.model.Rating; import java.time.LocalDate; import java.util.Optional; import java.util.Set; public class ParticipantProjectGroup { //Class used in purpose of saving in object state, which project group was selected to perform binding //Wraps Participant //selectedProjectGroup is changed every time user clicks "Selected project group only" private Participant participant; private Optional<ProjectGroup> selectedProjectGroup; public ParticipantProjectGroup(Participant participant){ this.participant = participant; this.selectedProjectGroup = Optional.empty(); } public void setSelectedProjectGroup(Optional<ProjectGroup> selectedProjectGroup){ this.selectedProjectGroup = selectedProjectGroup; } public String getName(){ return participant.getName(); } public String getSurname(){ return participant.getSurname(); } public String getIndexNumber(){ return participant.getIndexNumber(); } public String getEmail(){ return participant.getEmail(); } public LocalDate getRegistrationDate(){ return participant.getRegistrationDate(); } public Set<ProjectGroup> getParticipatesIn(){ return participant.getParticipatesIn(); } public Set<ProjectGroup> getLeaderIn(){ return participant.getLeaderIn(); } public Set<Rating> getRating(){ return participant.getRating(); } public String getFullName(){ return participant.getFullName(); } public String getNameEmailLabel(){ return participant.getNameEmailLabel(); } public Double getAverageRating(){ return participant.getAverageRating(selectedProjectGroup); } public String getAverageRatingTableCell(){ Double averageRating = participant.getAverageRating(selectedProjectGroup); if (averageRating == Double.NEGATIVE_INFINITY){ return "NO RATINGS"; } return String.format("%.2f", averageRating * 100) + " %"; } public long getRatingCount(){ return participant.getRatingCount(selectedProjectGroup); } public String getRatingCountTableCell(){ Long ratingCount = participant.getRatingCount(selectedProjectGroup); if (ratingCount == 0){ return "NO RATING"; } return String.valueOf(ratingCount); } public double getRatingSum(){ return participant.getRatingSum(selectedProjectGroup); } public String getRatingSumTableCell(){ Double ratingSum = participant.getRatingSum(selectedProjectGroup); if (ratingSum == Double.NEGATIVE_INFINITY){ return "NO RATING"; } return String.valueOf(ratingSum); } public double getMaxRatingSum(){ return participant.getMaxRatingSum(selectedProjectGroup); } public String getMaxRatingSumTableCell(){ Double maxRatingSum = participant.getMaxRatingSum(selectedProjectGroup); if (maxRatingSum == Double.NEGATIVE_INFINITY){ return "NO RATING"; } return String.valueOf(maxRatingSum); } }
abc9ad3a3eea6fc5bfc99f777d378d7a22d9315e
[ "Markdown", "Java", "Gradle" ]
23
Java
JakubRaban/project-manager
4a8c6b9fe7d13143223d9ed3336d96ec8cdbccea
5d2580b59f56f9c7525ff130ba4012d41a331458
refs/heads/master
<file_sep>package com.io.ikaonigiri.service; import com.io.ikaonigiri.repository.BoardRepository; import com.io.ikaonigiri.repository.LoginRepository; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.List; import java.util.Map; @Service public class LoginServiceImpl implements LoginService { @Autowired private LoginRepository loginRepository; @Override public Map<String, Object> checkLogIn(String input_id) { return loginRepository.checkLogIn(input_id); } @Override public Map<String, String> checkSignUp(String input_text, String input_value) { return loginRepository.checkSignUp(input_text,input_value); } @Override public int signUp(Map<String, String> input_value) { return loginRepository.signUp(input_value); } } <file_sep>package com.io.ikaonigiri.controller; import com.io.ikaonigiri.service.BoardService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import javax.servlet.http.HttpSession; @Controller public class HeaderController { @Autowired private BoardService boardService; @GetMapping("/ikaroom") public String callIkaRoom(Model model){ if(boardService.callBoards(1)!=null) model.addAttribute("lists",boardService.callBoards(1)); return "room"; } @GetMapping("/onigiriroom") public String callOnigiriRoom(Model model){ if(boardService.callBoards(2)!=null) model.addAttribute("lists",boardService.callBoards(2)); return "room"; } @GetMapping("/login") public String callLogin(){ return "login"; } @GetMapping("/logout") public String callLogOut(HttpSession session){ session.invalidate(); return "redirect:/"; } } <file_sep>plugins { id 'org.springframework.boot' version '2.4.6' id 'io.spring.dependency-management' version '1.0.11.RELEASE' id 'java' } group = 'com.example' version = '0.0.1-SNAPSHOT' sourceCompatibility = '11' configurations { compileOnly { extendsFrom annotationProcessor } } repositories { mavenCentral() } dependencies { implementation 'org.springframework.boot:spring-boot-starter-web' implementation 'org.mybatis.spring.boot:mybatis-spring-boot-starter:2.1.4' compileOnly 'org.projectlombok:lombok' runtimeOnly 'mysql:mysql-connector-java' annotationProcessor 'org.projectlombok:lombok' testImplementation 'org.springframework.boot:spring-boot-starter-test' implementation 'org.springframework.boot:spring-boot-starter-thymeleaf' implementation group: 'com.google.code.gson', name: 'gson', version: '2.8.5' implementation group: 'commons-io', name: 'commons-io', version: '2.4' // https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-websocket implementation group: 'org.springframework.boot', name: 'spring-boot-starter-websocket', version: '2.4.0' // https://mvnrepository.com/artifact/com.googlecode.json-simple/json-simple implementation group: 'com.googlecode.json-simple', name: 'json-simple', version: '1.1.1' } test { useJUnitPlatform() } <file_sep>package com.io.ikaonigiri.controller; import com.google.gson.JsonObject; import org.apache.commons.io.FileUtils; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.UUID; @RestController public class ImageUploadController { @PostMapping(value = "/uploadImage", produces = "application/json") public JsonObject uploadSummernoteImageFile(@RequestParam("file") MultipartFile multipartFile){ JsonObject jsonObject = new JsonObject(); String fileRoot = "C:\\summernote_image\\"; //저장될 경로 String originalFileName = multipartFile.getOriginalFilename(); String extension = originalFileName.substring(originalFileName.lastIndexOf('.')); // 랜덤 UUID+ 확장자로 저장될 파일이름 String savedFileName = UUID.randomUUID() + extension; File targetFile = new File(fileRoot + savedFileName); try { InputStream fileStream = multipartFile.getInputStream(); FileUtils.copyInputStreamToFile(fileStream, targetFile); //파일 저장 jsonObject.addProperty("url","/summernoteImage/"+savedFileName); jsonObject.addProperty("responseCode","success"); }catch (IOException e){ FileUtils.deleteQuietly(targetFile); // 실패시 저장된 파일 삭제 jsonObject.addProperty("responseCode", "error"); e.printStackTrace(); } return jsonObject; } } <file_sep>package com.io.ikaonigiri.controller; import com.io.ikaonigiri.service.LoginService; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.GetMapping; @Controller public class MainController { private LoginService loginService; @GetMapping("/") public String callMain(){ System.out.println("여기는 인덱스입니다."); return "index"; } } <file_sep>rootProject.name = 'ikaonigiri' <file_sep>package com.io.ikaonigiri.controller; import com.io.ikaonigiri.service.BoardService; import com.io.ikaonigiri.service.LoginService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.*; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; import java.util.HashMap; import java.util.Map; @RestController public class LogInController { @Autowired private LoginService loginService; @PostMapping("/login") public String checkLogIn(@RequestBody Map<String, String> login_values, HttpServletRequest request) { System.out.println(login_values); String id = login_values.get("id"); String password = login_values.get("password"); System.out.println(loginService.checkLogIn(id)); if (loginService.checkLogIn(id) == null) { return "id"; } else if (!loginService.checkLogIn(id).get("password").equals(password)) { return "password"; } else { HttpSession session = request.getSession(); String nickname = loginService.checkLogIn(id).get("nickname").toString(); String role = loginService.checkLogIn(id).get("role").toString(); HashMap<String,String> map = new HashMap(); map.put("nickname",nickname); map.put("role",role); session.setAttribute("id", map); return "login"; } } @PostMapping("/check-signup") public String checkSignUP(@RequestBody Map<String, String> signUpValues) { String id = signUpValues.get("id"); String nickname = signUpValues.get("nickname"); String email = signUpValues.get("email"); if (loginService.checkSignUp("user_id", id) != null) { return "id"; } else if (loginService.checkSignUp("nickname", nickname) != null) { return "nickname"; } else if (loginService.checkSignUp("email", email) != null) { return "email"; } else { loginService.signUp(signUpValues); return "ok"; } } } <file_sep>package com.io.ikaonigiri.controller; import com.io.ikaonigiri.service.BoardService; import com.io.ikaonigiri.service.LoginService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.*; import java.util.Map; @Controller public class BoardController { @Autowired private BoardService boardService; @GetMapping("/signup") public String signUp(){ return "signup"; } @GetMapping("/write") public String write(){ return "writeBoard"; } @PostMapping("/write") public String writeBoard(@RequestParam Map<String,String> formValue){ boardService.insertBoard(formValue); return "redirect:/ikaroom"; } @GetMapping("/detail/{id}") public String callBoard(@PathVariable("id") int id,Model model){ System.out.println(boardService.callBoard(id)); model.addAttribute("list",boardService.callBoard(id)); return "boardDetail"; } } <file_sep>spring.datasource.url=jdbc:mysql://localhost:3306/ikaoni spring.datasource.username=root spring.datasource.password=<PASSWORD> spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver spring.mvc.converters.preferred-json-mapper=gson mybatis.mapper-locations:classpath:mybatis/mapper/*.xml <file_sep>package com.io.ikaonigiri.repository; import org.apache.ibatis.annotations.Mapper; import java.util.List; import java.util.Map; @Mapper public interface BoardRepository { List<Map<String,String>> callBoards(int board_no); Map<String,String> callBoard(int id); int insertBoard(Map<String,String> value); int deleteBoard(int id); int updateBoard(Map<String, String> value); }
3ced7660ad524f5d94ccde42f3ed64ef6fd458e9
[ "Java", "INI", "Gradle" ]
10
Java
diqksk/ikaonigiri
2189bfd6429bcf57d5b41a7fdb5e736c1e21b469
43246539d24609665127f7f8e24a161f07361fcb
refs/heads/master
<repo_name>parthgandhi7/express-sequelize<file_sep>/migrations/migrations.js 'use strict'; module.exports = { up: function(queryInterface, Sequelize) { return queryInterface.renameColumn('user', 'address', 'add'); }, down: function(queryInterface, Sequelize) { return queryInterface.removeColumn('user', 'address'); } }; <file_sep>/api/modules/users/UserController.js /* globals exp */ "use strict" const router = exp.Router(); const Users = exp.models.Users; const Addresses = exp.models.Addresses; const Projects = exp.models.Projects; const Photos = exp.models.Photos; const userAddressAssociation = exp.associations.userAddress; const userPhotosAssociation = exp.associations.userPhotos; const logger = require('../../../config/logger.js'); const q = require('q'); router.post('/', (req, res) => { let usersObj = req.body; Users.create(usersObj, { include: [{ model: Addresses, as: 'address' }, { model: Projects, as: 'projects' }, userPhotosAssociation] }).then((user) => { res.status(200).send(user); }).catch((err) => { res.status(500).send(err); }); }); router.get('/', (req, res) => { Users.findAll().then((users) => { logger.info('All users retreived %s', users); res.status(200).send(users); }).catch((err) => { logger.error('Some error occured: ', err); res.status(500).send(err); }); }); router.get('/:userid', (req, res) => { let responseObj; let usersObj; Users.findById(req.params.userid).then((user) => { responseObj = user.dataValues; usersObj = user; return usersObj.getAddress(); }).then((address) => { if (address) { responseObj.address = address.dataValues; } return usersObj.getProjects(); }).then((projects) => { if (projects) { responseObj.projects = JSON.parse(JSON.stringify(projects)); } return usersObj.getPics(); }).then((photos) => { if (photos) { responseObj.pics = JSON.parse(JSON.stringify(photos)); } res.status(200).send(responseObj); }).catch((err) => { console.log(err); res.status(500).send(err); }); }); router.post('/:userid', (req, res) => { let usersObj = req.body; usersObj.id = req.params.userid; Users.upsert(usersObj).then((doc) => { res.status(200).send(usersObj); }).catch((err) => { res.status(500).send(err); }); }); router.delete('/:userid', (req, res) => { Users.destroy({ where: { id: req.params.userid }, limit: 1 }).then((doc) => { res.status(200).send("deleted"); }).catch((err) => { res.status(500).send(err); }); }); module.exports = router; <file_sep>/api/modules/address/Address.js "use strict" const Sequelize = require('sequelize'); const sequelize = exp.sequelize; const Address = sequelize.define('address', { line1: { type: Sequelize.STRING }, city: { type: Sequelize.STRING }, state: { type: Sequelize.STRING, allowNull: false } }, { freezeTableName: true // Model tableName will be the same as the model name }); // Address.sync({force: true}) global.exp.models.Addresses = Address; <file_sep>/README.md Small Express boiler plate with [sequelize ORM](http://docs.sequelizejs.com/en/v3/).
22d1a52f5da8e4c27514501661b1a5b1d8e77951
[ "JavaScript", "Markdown" ]
4
JavaScript
parthgandhi7/express-sequelize
b9f8cd0a5f4669e5848c5d89f320ba0de13b4b41
3debf915f27e9f6360c75235c754c7fe9b403266
refs/heads/master
<repo_name>davebattles/liri-node-app<file_sep>/app.js // Node module imports needed to run the functions var fs = require("fs"); var request = require("request"); var dotenv = require("dotenv").config(); var Spotify = require('node-spotify-api'); var keys = require("./keys.js"); require('console-wrap')(); var liriInput = process.argv[2]; var userInput = process.argv[3]; var indent = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"; var hr = "\n==============================================================\n"; var nextLine = "\n"; var redstart = "\x1b[31m"; var redend = "\x1b[0m"; console.clear(); process.stdout.write(nextLine); process.stdout.write("\x1b[31m ██▓ ██▓ ██▀███ ██▓\x1b[0m " + nextLine); process.stdout.write("\x1b[31m ▓██▒ ▓██▒▓██ ▒ ██▒▓██▒\x1b[0m -t for twitter user search " + nextLine); process.stdout.write("\x1b[31m ▒██░ ▒██▒▓██ ░▄█ ▒▒██▒\x1b[0m -ts for twitter tag streaming " + nextLine); process.stdout.write("\x1b[31m ▒██░ ░██░▒██▀▀█▄ ░██░\x1b[0m -tp to tweet " + nextLine); process.stdout.write("\x1b[31m ░██████▒░██░░██▓ ▒██▒░██░\x1b[0m -s for spotify " + nextLine); process.stdout.write("\x1b[31m ░ ▒░▓ ░░▓ ░ ▒▓ ░▒▓░░▓ \x1b[0m -o for omdb " + nextLine); process.stdout.write("\x1b[31m ░ ░ ▒ ░ ▒ ░ ░▒ ░ ▒░ ▒ ░\x1b[0m -do for manual entry " + nextLine); process.stdout.write("\x1b[31m ░ ░ ▒ ░ ░░ ░ ▒ ░\x1b[0m -h for help " + nextLine); process.stdout.write("\x1b[31m ░ ░ ░ ░ ░ \x1b[0m " + nextLine); switch (liriInput) { case "-t": myTweets(); break; case "-tp": tweetPost(); break; case "-s": spotifyThisSong(); break; case "-o": movieThis(); break; case "-ts": tweetStream(); break; case "-do": doWhatItSays(); break; case "-h": help(); break; } function help() { console.clear(); process.stdout.write(nextLine); process.stdout.write(redstart + " ██░ ██ ▓█████ ██▓ ██▓███ " + redend + nextLine); process.stdout.write(redstart + "▓██░ ██▒▓█ ▀ ▓██▒ ▓██░ ██▒" + redend + nextLine); process.stdout.write(redstart + "▒██▀▀██░▒███ ▒██░ ▓██░ ██▓▒" + redend + nextLine); process.stdout.write(redstart + "░▓█ ░██ ▒▓█ ▄ ▒██░ ▒██▄█▓▒ ▒" + redend + nextLine); process.stdout.write(redstart + "░▓█▒░██▓░▒████▒░██████▒▒██▒ ░ ░" + redend + nextLine); process.stdout.write(redstart + " ▒ ░░▒░▒░░ ▒░ ░░ ▒░▓ ░▒▓▒░ ░ ░" + redend + nextLine); process.stdout.write(redstart + " ▒ ░▒░ ░ ░ ░ ░░ ░ ▒ ░░▒ ░ " + redend + nextLine); process.stdout.write(redstart + " ░ ░░ ░ ░ ░ ░ ░░ " + redend + nextLine); process.stdout.write(redstart + " ░ ░ ░ ░ ░ ░ ░ " + redend + nextLine); process.stdout.write(redstart + " " + redend + nextLine); process.stdout.write(redstart + "Liri" + redend + " assists you with Twitter, Spotify, and Omdb Movie Lookup" + nextLine); process.stdout.write(redstart + "Liri" + redend + " takes an operator and an argument during execution" + nextLine); process.stdout.write("i.e: node app.js -tp 'This is a test'" + nextLine); process.stdout.write("The example above will post This is a test without quotations to the twitter account linked in your .env file" + nextLine); process.exit(); } function noEntry() { console.error(" You did not make a valid query! "); process.exit(); } // -s function spotifyThisSong() { var spotify = new Spotify(keys.spotify); var fullSong = ""; var fullArtists = ""; for (i = 3; i < process.argv.length; i++) { fullSong = fullSong + " " + process.argv[i]; } if (!userInput) { noEntry(); } if (doQuery == true){ fullSong = userInput; } spotify.search({ type: "track", query: fullSong }, function (err, data) { if (err) { console.log(err); } var songs = data.tracks.items; // Finds the full list of artists and formats them // removing the comma if theyre the last artist // The rest is the format for the results // for (var i = 0; i < songs.length; i++) { reduced amount of results var artistLog = []; for (var i = 0; i < 1; i++) { if (songs[i].artists.length != 1) { for (var a = 0; a < songs[i].artists.length; a++) { if (a == songs[i].artists.length - 1) { fullArtists = fullArtists + songs[i].artists[a].name; } else { fullArtists = fullArtists + songs[i].artists[a].name + ", "; } } } else { fullArtists = songs[i].artists[0].name; } process.stdout.write(hr + nextLine + redstart + "Artist(s): " + redend + fullArtists); process.stdout.write(nextLine + redstart + "album: " + redend + songs[i].album.name); process.stdout.write(nextLine + redstart + "title: " + redend + songs[i].name); if (songs[i].preview_url !== null) { process.stdout.write(nextLine + redstart + "url: " + redend + songs[i].preview_url + nextLine); } artistLog = { Artist: fullArtists, Album: songs[i].album.name, Title: songs[i].name, URL: songs[i].preview_url }; } fs.appendFile("log.txt", hr + nextLine + " Artist: " + fullArtists + " Album: " + artistLog.Album + " Title: " + artistLog.Title + " URL: " + artistLog.URL + nextLine + hr, function (error) { if (error) throw error; console.log(" *** results have been logged to log.txt ***"); }); process.stdout.write(hr + nextLine); }); } function tweetPost() { var Twitter = require('twitter'); var client = new Twitter(keys.twitter); var fullTweet = ""; for (i = 3; i < process.argv.length; i++) { fullTweet = fullTweet + " " + process.argv[i]; } var params = { status: fullTweet }; if (!userInput) { noEntry(); } client.post('statuses/update', params, function (error, tweet, response) { if (!error) { process.stdout.write("You tweeted: " + fullTweet + nextLine + nextLine); } }); } // -do function doWhatItSays() { fs.readFile("random.txt", "utf8", function (error, data) { if (error) throw error; var fileInput = ""; var cmd = data.split(" ", 1); data = data.replace(cmd, ""); fileInput = data.replace('"', ""); fileInput = fileInput.replace('"', ""); switch (cmd[0]) { case "-t": userInput = fileInput; myTweets(); break; case "-tp": userInput = fileInput; tweetPost(); break; case "-s": userInput = fileInput; doQuery = true; spotifyThisSong(); break; case "-o": userInput = fileInput; movieThis(); break; case "-ts": userInput = fileInput; tweetStream(); break; case "-do": userInput = fileInput; doWhatItSays(); break; case "-h": userInput = fileInput; help(); break; } }); } // -t userInput function myTweets() { var Twitter = require('twitter'); var client = new Twitter(keys.twitter); var text = "text"; var params = { screen_name: userInput, count: 20 }; if (!userInput) { noEntry(); } client.get('statuses/user_timeline', params, function (error, tweets, response) { if (!error) { for (var i = 0; i < tweets.length; i++) { var tweetNum = i + 1; var time = tweets[i].created_at; var timeArr = time.split(' '); var output = tweetNum + nextLine + tweets[i].text + nextLine + timeArr.slice(0, 4).join('- ') + nextLine + nextLine; process.stdout.write(output); fs.appendFile("log.txt", +nextLine + output, function (error) { if (error) throw error; }); } console.log(" *** results have been logged to log.txt ***"); } }); } // -ts function tweetStream() { var Twitter = require('twitter'); var client = new Twitter(keys.twitter); var fullTweet = ""; for (i = 3; i < process.argv.length; i++) { fullTweet = fullTweet + " " + process.argv[i]; } var params = { track: fullTweet }; client.stream('statuses/filter', params, function (stream) { stream.on('data', function (tweet) { console.log(tweet.text); }); stream.on('error', function (error) { console.log(error); }); }); } // -o userInput function movieThis() { var movie = userInput; if (!movie) { noEntry(); } movieName = movie; request("http://www.omdbapi.com/?t=" + movieName + "&y=&plot=short&apikey=trilogy", function (error, response, body) { if (!error && response.statusCode == 200) { var movieObject = JSON.parse(body); var movieResults = hr + redstart + "Title: " + redend + movieObject.Title + nextLine + redstart + "Year: " + redend + movieObject.Year + nextLine + redstart + "Country: " + redend + movieObject.Country + nextLine + redstart + "Director: " + redend + movieObject.Director + nextLine + redstart + "Writer: " + redend + movieObject.Writer + nextLine + redstart + "Production: " + redend + movieObject.Production + nextLine + redstart + "Language:" + redend + movieObject.Language + nextLine + redstart + "Awards:" + redend + movieObject.Awards + hr + redstart + "Imdb Rating: " + redend + movieObject.imdbRating + nextLine + redstart + movieObject.Ratings[0].Source + ": " + redend + movieObject.Ratings[0].Value + nextLine + redstart + movieObject.Ratings[1].Source + ": " + redend + movieObject.Ratings[1].Value + nextLine + redstart + movieObject.Ratings[2].Source + ": " + redend + movieObject.Ratings[2].Value + nextLine + redstart + "Metascore: " + redend + movieObject.Metascore + hr + "Actors:" + nextLine + indent + movieObject.Actors + hr + "Plot: " + nextLine + indent + movieObject.Plot + nextLine + nextLine + nextLine; process.stdout.write(movieResults); fs.appendFile("log.txt", movieResults, function (error) { if (error) throw error; console.log(" *** results have been logged to log.txt ***"); }); // console.log(movieObject); } else { console.error("Error :" + error); return; } }); }<file_sep>/README.md # liri-node-app ![trainlogo](https://my.mixtape.moe/vplbeg.png) -t for twitter user search -ts for twitter tag streaming -tp to tweet -s for spotify -o for omdb -do for manual entry -h for help
57b717dfba555b43dc81ca62c42d4c7ce952af87
[ "JavaScript", "Markdown" ]
2
JavaScript
davebattles/liri-node-app
0481b3627ba4188ac93a2afbfc09e14bbdbdd8dd
2bdee61c61c69bc24ac0ee1d1883643156a79baa
refs/heads/master
<repo_name>carlocorradini/happypuppy-server<file_sep>/src/util/EntityUtil.ts // eslint-disable-next-line no-unused-vars import { getMetadataArgsStorage, ObjectType } from 'typeorm'; // eslint-disable-next-line no-unused-vars import { ColumnMetadataArgs } from 'typeorm/metadata-args/ColumnMetadataArgs'; // eslint-disable-next-line no-unused-vars import { MetadataArgsStorage } from 'typeorm/metadata-args/MetadataArgsStorage'; export default class EntityUtil { private static storage(): MetadataArgsStorage { return getMetadataArgsStorage(); } private static getPropertyName(column: ColumnMetadataArgs): string { return column.propertyName; } public static columns<Entity>(entity: ObjectType<Entity>): ColumnMetadataArgs[] { return this.storage().filterColumns(entity); } public static selectableColumns<Entity>( entity: ObjectType<Entity>, addColumns?: (keyof Entity)[] ) { return this.columns(entity) .filter((column) => { return column.options.select === undefined || column.options.select === true; }) .map(this.getPropertyName) .concat(Array.isArray(addColumns) ? (addColumns as string[]) : []) as (keyof Entity)[]; } public static uniqueColumns<Entity>(entity: ObjectType<Entity>): (keyof Entity)[] { return this.columns(entity) .filter((column) => { return column.options.unique === true; }) .map(this.getPropertyName) as (keyof Entity)[]; } } <file_sep>/.env.example # NODE NODE_ENV=development PORT=8080 # DATABASE DATABASE_URL=postgres://username:password@localhost:5432/happypuppy DATABASE_SSL=false DATABASE_SYNCHRONIZE=true DATABASE_LOGGING=false # SERVICE SERVICE_EMAIL_HOST=smtp.example.com SERVICE_EMAIL_PORT=465 SERVICE_EMAIL_SECURE=true SERVICE_EMAIL_USERNAME=username SERVICE_EMAIL_PASSWORD=<PASSWORD> SERVICE_PHONE_SID=BU6bf7452dgycdwkw839fskaue492mc92j SERVICE_PHONE_TOKEN=<PASSWORD> SERVICE_PHONE_NUMBER=+15005550006 SERVICE_IMAGE_CLOUD=mns95k29c SERVICE_IMAGE_KEY=205472098260441 SERVICE_IMAGE_SECRET=HDPVHWChvksw74Hc923lcHSPd29 # SECURITY SECURITY_BCRYPT_SALT_ROUNS=12 SECURITY_JWT_SECRET=MySuperSecretPrivateKey SECURITY_JWT_EXPIRES_IN=32d<file_sep>/src/db/entity/UserPasswordReset.ts /* eslint-disable camelcase */ import { Entity, OneToOne, JoinColumn, Column, CreateDateColumn, UpdateDateColumn } from 'typeorm'; import { IsEmpty, IsString, Length } from 'class-validator'; import config from '@app/config'; import User from './User'; @Entity('user_password_reset') export default class UserPasswordReset { @OneToOne(() => User, { primary: true, onDelete: 'CASCADE' }) @JoinColumn({ name: 'user_id' }) @IsEmpty({ always: true }) user!: User; @Column({ name: 'token', length: config.SECURITY.TOKEN.PASSWORD.LENGTH }) @IsString() @Length(config.SECURITY.TOKEN.PASSWORD.LENGTH, config.SECURITY.TOKEN.PASSWORD.LENGTH) token!: string; @Column({ name: 'used' }) @IsEmpty({ always: true }) used!: boolean; @IsString() @Length(8, 64) password!: string; @CreateDateColumn({ name: 'created_at', select: false, update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at' }) @IsEmpty({ always: true }) updated_at!: Date; } <file_sep>/src/util/ArrayUtil.ts export default class ArrayUtil { public static contains(from: any[], elements: any[]): boolean { return elements.every((e) => from.includes(e)); } } <file_sep>/src/controller/AnimalPlaceController.ts /* eslint-disable camelcase */ // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getManager, Between } from 'typeorm'; import logger from '@app/logger'; import AnimalPark from '@app/db/entity/AnimalPlace'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; // eslint-disable-next-line no-unused-vars import GisUtil, { BoundingBox } from '@app/util/GisUtil'; export default class AnimalPlaceController { public static find(req: Request, res: Response): void { const { limit, offset, sort, sort_order, id, name, type, latitude, longitude, radius, } = req.query; const boundingBox: BoundingBox | undefined = latitude !== undefined && longitude !== undefined && radius !== undefined ? GisUtil.calculateBoundingBox( GisUtil.toCoordinates(latitude as string, longitude as string), radius as string ) : undefined; getManager() .find(AnimalPark, { ...(limit !== undefined && { take: (limit as unknown) as number }), ...(offset !== undefined && { skip: (offset as unknown) as number }), ...(sort !== undefined && sort_order !== undefined && { order: { [sort as keyof AnimalPark]: sort_order, }, }), loadRelationIds: true, where: { ...(id !== undefined && { id }), ...(name !== undefined && { name }), ...(type !== undefined && { type }), ...(boundingBox !== undefined && { latitude: Between(boundingBox.min.latitude, boundingBox.max.latitude), longitude: Between(boundingBox.min.longitude, boundingBox.max.longitude), }), }, }) .then((parks) => { if (boundingBox !== undefined) { // eslint-disable-next-line no-param-reassign parks = parks.filter( (park) => GisUtil.distance( boundingBox.pivot, GisUtil.toCoordinates(park.latitude, park.longitude) ) < boundingBox.radius ); } logger.info(`Found ${parks.length} Animal Parks`); ResponseHelper.send(res, HttpStatusCode.OK, parks); }) .catch((ex) => { logger.warn(`Failed to find Animal Parks due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static findById(req: Request, res: Response): void { const { id } = req.params; getManager() .findOneOrFail(AnimalPark, id) .then((park) => { logger.info(`Found Animal Park ${park.id}`); ResponseHelper.send(res, HttpStatusCode.OK, park); }) .catch((ex) => { logger.warn(`Failed to find Animal Park ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/typings/express/index.d.ts // eslint-disable-next-line no-unused-vars import { types } from '@app/common'; declare global { namespace Express { export interface Request { user?: types.JWT.Payload; } } } <file_sep>/src/controller/UserController.ts /* eslint-disable camelcase */ // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getCustomRepository, getManager, Between, Like } from 'typeorm'; import moment from 'moment'; import logger from '@app/logger'; import User from '@app/db/entity/User'; import UserRepository from '@app/db/repository/UserRepository'; import UserPasswordResetRepository from '@app/db/repository/UserPasswordResetRepository'; import { DuplicateEntityError, UserNotVerifiedError, InvalidTokenException, } from '@app/common/error'; import { ResponseHelper, HttpStatusCode, JWTHelper } from '@app/helper'; // eslint-disable-next-line no-unused-vars import UserPasswordReset from '@app/db/entity/UserPasswordReset'; // eslint-disable-next-line no-unused-vars import UserFriend from '@app/db/entity/UserFriend'; export default class UserController { public static find(req: Request, res: Response): void { const { limit, offset, sort, sort_order, id, username, role, verified, name, surname, gender, date_of_birth, created_at, } = req.query; getManager() .find(User, { ...(limit !== undefined && { take: (limit as unknown) as number }), ...(offset !== undefined && { skip: (offset as unknown) as number }), ...(sort !== undefined && sort_order !== undefined && { order: { [sort as keyof User]: sort_order, }, }), loadRelationIds: true, where: { ...(id !== undefined && { id }), ...(username !== undefined && { username: Like(`%${username}%`) }), ...(role !== undefined && { role }), ...(verified !== undefined ? { verified } : { verified: true }), ...(name !== undefined && { name: Like(`%${name}%`) }), ...(surname !== undefined && { surname: Like(`%${surname}%`) }), ...(gender !== undefined && { gender }), ...(date_of_birth !== undefined && { date_of_birth }), ...(created_at !== undefined && { created_at: Between( moment(`${created_at}T00:00:00.000`), moment(`${created_at}T23:59:59.999`) ), }), }, }) .then((users) => { // eslint-disable-next-line no-param-reassign users = users.map((user) => { // eslint-disable-next-line no-param-reassign user.friends = (user.friends.length as unknown) as UserFriend[]; return user; }); logger.info(`Found ${users.length} Users`); ResponseHelper.send(res, HttpStatusCode.OK, users); }) .catch((ex) => { logger.warn(`Failed to find Users due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static findById(req: Request, res: Response): void { const { id } = req.params; getCustomRepository(UserRepository) .findOneAndVerifiedOrFail(id, { loadRelationIds: true }) .then((user) => { // eslint-disable-next-line no-param-reassign user.friends = (user.friends.length as unknown) as UserFriend[]; logger.info(`Found User ${user.id}`); ResponseHelper.send(res, HttpStatusCode.OK, user); }) .catch((ex) => { logger.warn(`Failed to find User ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound' || ex instanceof UserNotVerifiedError) ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static me(req: Request, res: Response): void { const id = req.user?.id ? req.user.id : ''; getCustomRepository(UserRepository) .findOneAndVerifiedOrFail(id, { loadRelationIds: true }) .then((user) => { // eslint-disable-next-line no-param-reassign user.friends = (user.friends.length as unknown) as UserFriend[]; logger.info(`Found User me ${user.id}`); ResponseHelper.send(res, HttpStatusCode.OK, user); }) .catch((ex) => { logger.warn(`Failed to find User me ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound' || ex instanceof UserNotVerifiedError) ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static create(req: Request, res: Response): void { const user: User = req.app.locals.User; getCustomRepository(UserRepository) .saveOrFail(user) .then((newUser) => { logger.info(`Created User ${newUser.id}`); ResponseHelper.send(res, HttpStatusCode.CREATED, newUser.id); }) .catch((ex) => { logger.warn(`Failed to create User due to ${ex.message}`); if (ex instanceof DuplicateEntityError) ResponseHelper.send(res, HttpStatusCode.CONFLICT, ex.errors); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static signIn(req: Request, res: Response): void { const user: User = getManager().create(User, { username: req.body.username, password: <PASSWORD>, }); getCustomRepository(UserRepository) .signInOrFail(user) .then(async (token) => { logger.info( `Authentication with credentials succeeded for User ${(await JWTHelper.verify(token)).id}` ); ResponseHelper.send(res, HttpStatusCode.OK, token); }) .catch((ex) => { logger.warn(`Failed to authenticate User with credentials due to ${ex.message}`); if (ex instanceof UserNotVerifiedError) ResponseHelper.send(res, HttpStatusCode.FORBIDDEN, ex.id); else ResponseHelper.send(res, HttpStatusCode.UNAUTHORIZED); }); } public static passwordResetRequest(req: Request, res: Response): void { const { email } = req.params; getManager() .findOneOrFail(User, { where: { email }, select: ['id', 'username', 'email'] }) .then((user) => getCustomRepository(UserPasswordResetRepository).request(user)) .then((userPasswordReset) => { logger.info(`Request reset password sended for User ${userPasswordReset.user.id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn(`Failed sending reset password request for User ${email} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static passwordReset(req: Request, res: Response): void { const userPasswordReset: UserPasswordReset = req.app.locals.UserPasswordReset; getCustomRepository(UserPasswordResetRepository) .change(userPasswordReset) .then((user) => { logger.info(`Updated User ${user.id} password`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn( `Failed to update User ${userPasswordReset.token} password due to ${ex.message}` ); if (ex.name === 'EntityNotFound' || ex instanceof InvalidTokenException) ResponseHelper.send(res, HttpStatusCode.UNAUTHORIZED); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static update(req: Request, res: Response): void { const user: User = req.app.locals.User; user.id = req.user?.id ? req.user.id : ''; getCustomRepository(UserRepository) .updateOrFail(user) .then((upUser) => { logger.info(`Updated User ${upUser.id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn(`Failed to update User ${user.id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else if (ex instanceof DuplicateEntityError) ResponseHelper.send(res, HttpStatusCode.CONFLICT, ex.errors); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static updateAvatar(req: Request, res: Response): void { const id: string = req.user?.id ? req.user.id : ''; getCustomRepository(UserRepository) .updateAvataOrFail(getManager().create(User, { id }), req.file) .then((user) => { logger.info(`Changed avatar for User ${user.id} to ${user.avatar}`); ResponseHelper.send(res, HttpStatusCode.OK, user.avatar); }) .catch((ex) => { logger.error(`Failed to change avatar for User ${id} due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static delete(req: Request, res: Response): void { const id: string = req.user?.id ? req.user.id : ''; getCustomRepository(UserRepository) .deleteOrFail(getManager().create(User, { id })) .then(() => { logger.info(`Deleted User ${id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn(`Failed to delete User ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/route/api/v1/auth/index.ts import { Router } from 'express'; import user from './user'; // eslint-disable-next-line camelcase import user_friend from './user_friend'; import puppy from './puppy'; // eslint-disable-next-line camelcase import animal_personality from './animal_personality'; // eslint-disable-next-line camelcase import animal_specie from './animal_specie'; // eslint-disable-next-line camelcase import animal_breed from './animal_breed'; // eslint-disable-next-line camelcase import animal_place from './animal_place'; const router = Router(); router.use('/user', user); router.use('/user_friend', user_friend); router.use('/puppy', puppy); router.use('/animal_personality', animal_personality); router.use('/animal_specie', animal_specie); router.use('/animal_breed', animal_breed); router.use('/animal_place', animal_place); export default router; <file_sep>/src/common/validator/index.ts export { default as HasNoWhitespace } from './HasNoWhitespace'; export { default as IsValidAnimalSpecie } from './IsValidAnimalSpecie'; export { default as IsValidAnimalPersonalityArray } from './IsValidAnimalPersonalityArray'; export { default as IsValidAnimalBreedArray } from './IsValidAnimalBreedArray'; export { default as IsAnimalBreedArrayBelongToAnimalSpecie } from './IsAnimalBreedArrayBelongToAnimalSpecie'; <file_sep>/src/route/api/status.ts // eslint-disable-next-line no-unused-vars import { Router, Request, Response } from 'express'; import { HttpStatusCode } from '@app/helper'; const router = Router(); const status = (_req: Request, res: Response) => { res.status(HttpStatusCode.OK.code).end(); }; router.get('/', status); router.head('/', status); export default router; <file_sep>/src/common/types.ts // eslint-disable-next-line no-unused-vars import { UserRole } from '@app/db/entity/User'; export namespace UTIL { export type ValueOf<T> = T[keyof T]; } export namespace JWT { export interface Payload { id: string; role: UserRole; } } <file_sep>/src/controller/UserFriendController.ts /* eslint-disable camelcase */ // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getManager, getCustomRepository, Between } from 'typeorm'; import moment from 'moment'; import UserFriend from '@app/db/entity/UserFriend'; import User from '@app/db/entity/User'; import logger from '@app/logger'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; import UserFriendRepository from '@app/db/repository/UserFriendRepository'; import { DuplicateEntityError } from '@app/common/error'; export default class UserFriendController { public static find(req: Request, res: Response): void { const { limit, offset, sort, sort_order, type, created_at } = req.query; const user: User = getManager().create(User, { id: req.user?.id ? req.user.id : '' }); const friend: User | undefined = req.query.friend !== undefined ? getManager().create(User, { id: req.query.friend as string }) : undefined; getManager() .find(UserFriend, { ...(limit !== undefined && { take: (limit as unknown) as number }), ...(offset !== undefined && { skip: (offset as unknown) as number }), ...(sort !== undefined && sort_order !== undefined && { order: { [sort as keyof UserFriend]: sort_order, }, }), loadRelationIds: true, where: { user, ...(friend !== undefined && { friend }), ...(type !== undefined && { type }), ...(created_at !== undefined && { created_at: Between( moment(`${created_at}T00:00:00.000`), moment(`${created_at}T23:59:59.999`) ), }), }, }) .then((userFriends) => { logger.info(`Found ${userFriends.length} User Friends of ${user.id}`); ResponseHelper.send(res, HttpStatusCode.OK, userFriends); }) .catch((ex) => { logger.warn(`Failed to find User Friends of ${user.id} due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static findById(req: Request, res: Response): void { const user: User = getManager().create(User, { id: req.user?.id ? req.user.id : '' }); const friend: User = getManager().create(User, { id: req.params.id }); getManager() .findOneOrFail( UserFriend, { user, friend, }, { loadRelationIds: true, } ) .then((userFriend) => { logger.info(`Found User Friend ${userFriend.friend} of ${user.id}`); ResponseHelper.send(res, HttpStatusCode.OK, userFriend); }) .catch((ex) => { logger.warn(`Failed to find User Friend ${friend.id} of ${user.id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static create(req: Request, res: Response): void { const userFriend: UserFriend = req.app.locals.UserFriend; userFriend.user = getManager().create(User, { id: req.user?.id ? req.user.id : '' }); userFriend.friend = getManager().create(User, { id: (userFriend.friend as unknown) as string }); getCustomRepository(UserFriendRepository) .saveOrFail(userFriend) .then((newUserFriend) => { logger.info(`Created User Friend ${newUserFriend.friend.id} of ${newUserFriend.user.id}`); ResponseHelper.send(res, HttpStatusCode.CREATED, newUserFriend.friend.id); }) .catch((ex) => { logger.warn( `Failed to create User Friend ${userFriend.friend.id} of ${userFriend.user.id} due to ${ex.message}` ); if (ex instanceof DuplicateEntityError) ResponseHelper.send(res, HttpStatusCode.CONFLICT, ex.errors); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static update(req: Request, res: Response): void { const userFriend: UserFriend = req.app.locals.UserFriend; userFriend.user = getManager().create(User, { id: req.user?.id ? req.user.id : '' }); userFriend.friend = getManager().create(User, { id: req.params.id }); getCustomRepository(UserFriendRepository) .updateOrFail(userFriend) .then((upUserFriend) => { logger.info(`Updated User Friend ${upUserFriend.friend.id} of ${upUserFriend.user.id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn( `Failed to update User Friend ${userFriend.friend.id} of ${userFriend.user.id} due to ${ex.message}` ); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else if (ex instanceof DuplicateEntityError) ResponseHelper.send(res, HttpStatusCode.CONFLICT, ex.errors); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static delete(req: Request, res: Response): void { const userFriend: UserFriend = getManager().create(UserFriend, { user: getManager().create(User, { id: req.user?.id ? req.user.id : '' }), friend: getManager().create(User, { id: req.params.id }), }); getCustomRepository(UserFriendRepository) .deleteOrFail(userFriend) .then(() => { logger.info(`Deleted User Friend ${userFriend.friend.id} of ${userFriend.user.id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn( `Failed to delete User Friend ${userFriend.friend.id} of ${userFriend.user.id} due to ${ex.message}` ); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/helper/responseHelper.ts // eslint-disable-next-line no-unused-vars import { Response as ExpressResponse } from 'express'; // eslint-disable-next-line no-unused-vars import HttpStatusCode, { Status } from './httpStatusCode'; export interface Response { status: Status; // eslint-disable-next-line camelcase is_success: boolean; // eslint-disable-next-line camelcase status_code: number; // eslint-disable-next-line camelcase status_code_name: string; data: object; } export default class ResponseHelper { public static send( res: ExpressResponse, httpStatusCode: HttpStatusCode, data: any = undefined ): void { res .status(httpStatusCode.code) .json(<Response>{ status: httpStatusCode.status(), is_success: httpStatusCode.isSuccess(), status_code: httpStatusCode.code, status_code_name: httpStatusCode.name, data, }) .end(); } } <file_sep>/src/db/entity/Puppy.ts /* eslint-disable camelcase */ import { Entity, PrimaryGeneratedColumn, Index, Column, CreateDateColumn, UpdateDateColumn, ManyToOne, JoinColumn, BeforeInsert, ManyToMany, JoinTable, Check, BeforeUpdate, } from 'typeorm'; import { IsString, Length, IsEmpty, IsOptional, IsEnum, IsInt, IsPositive, Min, Max, IsISO8601, IsArray, ArrayUnique, } from 'class-validator'; import { IsValidAnimalSpecie, IsValidAnimalPersonalityArray, IsValidAnimalBreedArray, IsAnimalBreedArrayBelongToAnimalSpecie, } from '@app/common/validator'; import User from './User'; import AnimalSpecie from './AnimalSpecie'; import AnimalBreed from './AnimalBreed'; import AnimalPersonality from './AnimalPersonality'; export enum PuppyValidationGroup { // eslint-disable-next-line no-unused-vars CREATION = 'creation', // eslint-disable-next-line no-unused-vars UPDATE = 'update', } export enum PuppyGender { // eslint-disable-next-line no-unused-vars MALE = 'male', // eslint-disable-next-line no-unused-vars FEMALE = 'female', } /** * Mininum puppy weight in grams */ export const PUPPY_MIN_WEIGHT: number = 0; /** * Maximum puppy weight in grams * Weight of a Blue Whale */ export const PUPPY_MAX_WEIGHT: number = 190000000; @Entity('puppy') @Check(`weight >= ${PUPPY_MIN_WEIGHT} AND weight <= ${PUPPY_MAX_WEIGHT}`) export default class Puppy { @PrimaryGeneratedColumn('increment', { name: 'id' }) @Index() @IsEmpty({ always: true }) id!: number; @Column({ name: 'name', length: 64 }) @IsString({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @Length(1, 64, { groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @IsOptional({ groups: [PuppyValidationGroup.UPDATE] }) name!: string; @Column({ name: 'gender', type: 'enum', enum: PuppyGender, update: false, }) @IsEnum(PuppyGender, { groups: [PuppyValidationGroup.CREATION] }) @IsEmpty({ groups: [PuppyValidationGroup.UPDATE] }) gender!: PuppyGender; @Column({ name: 'date_of_birth', type: 'date', nullable: true, default: undefined }) @IsISO8601( { strict: true }, { groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] } ) @IsOptional({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) date_of_birth!: Date; @Column({ name: 'weight', type: 'integer', nullable: true, default: undefined }) @IsInt({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @Min(PUPPY_MIN_WEIGHT, { groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE], }) @Max(PUPPY_MAX_WEIGHT, { groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE], }) @IsOptional({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) weight?: number | null; @Column({ name: 'avatar', length: 256 }) @IsEmpty({ always: true }) avatar!: string; @ManyToOne(() => User, (user) => user.puppies, { nullable: false, onDelete: 'CASCADE' }) @JoinColumn({ name: 'user_id' }) @IsEmpty({ always: true }) user!: User; @ManyToOne(() => AnimalSpecie, undefined, { nullable: false }) @JoinColumn({ name: 'specie_id' }) @IsInt({ groups: [PuppyValidationGroup.CREATION] }) @IsPositive({ groups: [PuppyValidationGroup.CREATION] }) @IsValidAnimalSpecie({ groups: [PuppyValidationGroup.CREATION] }) @IsEmpty({ groups: [PuppyValidationGroup.UPDATE] }) specie!: AnimalSpecie; @ManyToMany(() => AnimalBreed) @JoinTable({ name: 'puppy_animal_breed', joinColumn: { name: 'puppy_id', }, inverseJoinColumn: { name: 'breed_id', }, }) @IsArray({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @ArrayUnique({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @IsInt({ each: true, groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @IsPositive({ each: true, groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @IsValidAnimalBreedArray({ groups: [PuppyValidationGroup.CREATION] }) @IsAnimalBreedArrayBelongToAnimalSpecie({ groups: [PuppyValidationGroup.CREATION] }) @IsOptional({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) breeds!: AnimalBreed[]; @ManyToMany(() => AnimalPersonality) @JoinTable({ name: 'puppy_animal_personality', joinColumn: { name: 'puppy_id', }, inverseJoinColumn: { name: 'personality_id', }, }) @IsArray({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @ArrayUnique({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @IsInt({ each: true, groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) @IsPositive({ each: true, groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE], }) @IsValidAnimalPersonalityArray({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE], }) @IsOptional({ groups: [PuppyValidationGroup.CREATION, PuppyValidationGroup.UPDATE] }) personalities!: AnimalPersonality[]; @CreateDateColumn({ name: 'created_at', update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; @BeforeInsert() @BeforeUpdate() toUndefinedIfEmpty() { if (typeof this.weight === 'number' && this.weight === 0) this.weight = 0; } @BeforeInsert() defaultAvatar() { this.avatar = `https://res.cloudinary.com/dxiqa0xwa/image/upload/v1586709310/happypuppy/upload/puppy/avatar/${ !this.breeds || this.breeds.length === 0 ? this.specie.name.toLowerCase() : `${this.specie.name}_${this.breeds[0].name.replace(/ /g, '_')}`.toLowerCase() }.png`; } } <file_sep>/src/db/entity/User.ts /* eslint-disable camelcase */ import { Entity, PrimaryGeneratedColumn, Index, Column, CreateDateColumn, UpdateDateColumn, BeforeInsert, BeforeUpdate, OneToMany, } from 'typeorm'; import { IsString, IsEmail, IsEnum, Length, IsEmpty, IsOptional, IsMobilePhone, IsISO8601, isEmpty, } from 'class-validator'; import { CryptUtil } from '@app/util'; import { HasNoWhitespace } from '@app/common/validator'; import Puppy from './Puppy'; import UserFriend from './UserFriend'; export enum UserValidationGroup { // eslint-disable-next-line no-unused-vars CREATION = 'creation', // eslint-disable-next-line no-unused-vars SIGN_IN = 'sign_in', // eslint-disable-next-line no-unused-vars UPDATE = 'update', } export enum UserGender { // eslint-disable-next-line no-unused-vars MALE = 'male', // eslint-disable-next-line no-unused-vars FEMALE = 'female', // eslint-disable-next-line no-unused-vars UNKNOWN = 'unknown', } export enum UserRole { // eslint-disable-next-line no-unused-vars ADMIN = 'admin', // eslint-disable-next-line no-unused-vars STANDARD = 'standard', } @Entity('user') export default class User { @PrimaryGeneratedColumn('uuid', { name: 'id' }) @Index() @IsEmpty({ always: true }) id!: string; @Column({ name: 'username', length: 128, unique: true, update: false }) @IsString({ groups: [UserValidationGroup.CREATION, UserValidationGroup.SIGN_IN] }) @Length(1, 128, { groups: [UserValidationGroup.CREATION, UserValidationGroup.SIGN_IN] }) @HasNoWhitespace({ groups: [UserValidationGroup.CREATION, UserValidationGroup.SIGN_IN] }) @IsEmpty({ groups: [UserValidationGroup.UPDATE] }) username!: string; @Column({ name: 'password', length: 72, select: false }) @IsString({ groups: [UserValidationGroup.CREATION, UserValidationGroup.SIGN_IN, UserValidationGroup.UPDATE], }) @Length(8, 64, { groups: [UserValidationGroup.CREATION, UserValidationGroup.SIGN_IN, UserValidationGroup.UPDATE], }) @IsOptional({ groups: [UserValidationGroup.UPDATE] }) password!: string; @Column({ name: 'role', type: 'enum', enum: UserRole, default: UserRole.STANDARD, }) @IsEnum(UserRole, { groups: [UserValidationGroup.UPDATE] }) @IsEmpty({ groups: [UserValidationGroup.CREATION] }) @IsOptional({ groups: [UserValidationGroup.UPDATE] }) role!: UserRole; @Column({ name: 'verified', default: false, select: false }) @IsEmpty({ always: true }) verified!: boolean; @Column({ name: 'name', type: 'varchar', length: 64, nullable: true, default: undefined }) @IsString({ groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) @Length(0, 64, { groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) @IsOptional({ groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) name?: string | null; @Column({ name: 'surname', type: 'varchar', length: 64, nullable: true, default: undefined }) @IsString({ groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) @Length(0, 64, { groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) @IsOptional({ groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) surname?: string | null; @Column({ name: 'gender', type: 'enum', enum: UserGender, default: UserGender.UNKNOWN, }) @IsEnum(UserGender, { groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) @IsOptional({ groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) gender!: UserGender; @Column({ name: 'date_of_birth', type: 'date', nullable: true, default: undefined }) @IsISO8601( { strict: true }, { groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] } ) @IsOptional({ groups: [UserValidationGroup.CREATION, UserValidationGroup.UPDATE] }) date_of_birth!: Date; @Column({ name: 'email', length: 128, unique: true, select: false, update: false }) @IsEmail(undefined, { groups: [UserValidationGroup.CREATION] }) @Length(3, 128, { groups: [UserValidationGroup.CREATION] }) @IsEmpty({ groups: [UserValidationGroup.UPDATE] }) email!: string; @Column({ name: 'phone', length: 15, unique: true, select: false, update: false }) @IsMobilePhone(undefined, { strictMode: true }, { groups: [UserValidationGroup.CREATION] }) @Length(8, 15, { groups: [UserValidationGroup.CREATION] }) @IsEmpty({ groups: [UserValidationGroup.UPDATE] }) phone!: string; @Column({ name: 'avatar', length: 256 }) @IsEmpty({ always: true }) avatar!: string; @OneToMany(() => Puppy, (puppy) => puppy.user) @IsEmpty({ always: true }) puppies!: Puppy[]; @OneToMany(() => UserFriend, (userFriend) => userFriend.user) @IsEmpty({ always: true }) friends!: UserFriend[]; @CreateDateColumn({ name: 'created_at', update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; @BeforeInsert() @BeforeUpdate() toUndefinedIfEmpty() { if (typeof this.name === 'string' && isEmpty(this.name)) this.name = null; if (typeof this.surname === 'string' && isEmpty(this.surname)) this.surname = null; } @BeforeInsert() @BeforeUpdate() capitalizeName() { if (this.name) this.name = this.name.replace(/^\w/, (c) => c.toUpperCase()); } @BeforeInsert() @BeforeUpdate() capitalizeSurname() { if (this.surname) this.surname = this.surname.replace(/^\w/, (c) => c.toUpperCase()); } @BeforeInsert() @BeforeUpdate() async hashPassword() { if (this.password) this.password = await CryptUtil.hash(this.password); } @BeforeInsert() defaultAvatar() { if (!this.gender) this.gender = UserGender.UNKNOWN; this.avatar = `https://res.cloudinary.com/dxiqa0xwa/image/upload/v1586709310/happypuppy/upload/user/avatar/${this.gender}.png`; } } <file_sep>/src/service/ImageService.ts import cloudinary from 'cloudinary'; // eslint-disable-next-line no-unused-vars import { types } from '@app/common'; import logger from '@app/logger'; import config from '@app/config'; import { ConfigurationError } from '@app/common/error'; export enum ImageType { // eslint-disable-next-line no-unused-vars AVATAR, } export interface ImageTypeFolders extends Record<ImageType, string[]> { [ImageType.AVATAR]: ['user/avatar', 'puppy/avatar']; } export const ImageTypeOptions: Record<ImageType, cloudinary.UploadApiOptions> = { [ImageType.AVATAR]: { width: 512, height: 512, crop: 'fill', quality: 'auto', }, }; export interface ImageOptions<T extends ImageType> { type: T; folder: types.UTIL.ValueOf<ImageTypeFolders[T]>; uploadOptions?: cloudinary.UploadApiOptions; } export default class ImageService { private static options: cloudinary.UploadApiOptions; private static configured: boolean = false; public static configure(): void { if (this.configured) return; cloudinary.v2.config({ cloud_name: config.SERVICE.IMAGE.CLOUD, api_key: config.SERVICE.IMAGE.KEY, api_secret: config.SERVICE.IMAGE.SECRET, }); this.options = { resource_type: 'image', format: 'png', unique_filename: true, discard_original_filename: true, folder: `happypuppy/upload/`, }; this.configured = true; logger.info('Image service configured'); } public static upload<T extends ImageType>( image: Express.Multer.File, options: ImageOptions<T> ): Promise<cloudinary.UploadApiResponse> { if (!this.configured) throw new ConfigurationError('Image Service is not configured'); return new Promise((resolve, reject) => { cloudinary.v2.uploader .upload_stream(this.transformOptions(options), (err, result) => { if (err) { logger.error(`Error uploading image due to ${err.message}`); reject(err); } else { logger.info(`Uploaded image ${JSON.stringify(result)}`); resolve(result); } }) .end(image.buffer); }); } private static transformOptions<T extends ImageType>( options: ImageOptions<T> ): cloudinary.UploadApiOptions { // Construct correct folder options object const folder: cloudinary.UploadApiOptions = { folder: (this.options.folder as string) + (options.folder as string), }; // Delete possibly forced options Object.keys({ ...this.options, ...ImageTypeOptions[options.type] }).forEach((key) => { // eslint-disable-next-line no-param-reassign if (options?.uploadOptions && options.uploadOptions[key]) delete options.uploadOptions[key]; }); // Add correct folder to upload options // eslint-disable-next-line no-param-reassign options.uploadOptions = options.uploadOptions !== undefined ? Object.assign(options.uploadOptions, folder) : folder; return { ...this.options, ...ImageTypeOptions[options.type], ...options.uploadOptions }; } } <file_sep>/src/middleware/ErrorMiddleware.ts // eslint-disable-next-line no-unused-vars import { Response, Request, NextFunction } from 'express'; import { UnauthorizedError } from 'express-jwt'; import multer from 'multer'; import logger from '@app/logger'; import { EnvUtil } from '@app/util'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; import { EmptyFileError } from '@app/common/error'; export default class ErrorMiddleware { // eslint-disable-next-line no-unused-vars public static handle(err: Error, _req: Request, res: Response, _next: NextFunction): void { if (err instanceof UnauthorizedError) { logger.warn(`Authentication with JWT failed due to ${err.message}`); ResponseHelper.send(res, HttpStatusCode.UNAUTHORIZED); } else if (err instanceof SyntaxError) { logger.warn(`Malformed JSON due to ${err.message}`); ResponseHelper.send(res, HttpStatusCode.BAD_REQUEST, [err.message]); } else if (err instanceof multer.MulterError) { logger.warn(`File uploading error due to ${err.message}`); switch (err.code) { case 'LIMIT_PART_COUNT': case 'LIMIT_FILE_SIZE': case 'LIMIT_FILE_COUNT': case 'LIMIT_FIELD_KEY': case 'LIMIT_FIELD_VALUE': case 'LIMIT_FIELD_COUNT': case 'LIMIT_UNEXPECTED_FILE': { ResponseHelper.send(res, HttpStatusCode.BAD_REQUEST, err); break; } default: { ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); break; } } } else if (err instanceof EmptyFileError) { logger.warn(`File uploading error due to ${err.message}`); ResponseHelper.send(res, HttpStatusCode.BAD_REQUEST, err); } else { logger.error(`Internal Server error due to ${err.message}`); ResponseHelper.send( res, HttpStatusCode.INTERNAL_SERVER_ERROR, EnvUtil.isDevelopment() ? err.stack : undefined ); } } } <file_sep>/README.md # Happy Puppy Server [![Build Status](https://github.com/carlocorradini/happypuppy-server/workflows/build/badge.svg)](https://github.com/carlocorradini/happypuppy-server/actions) ## Members | Name | Surname | Username | MAT | | :-----: | :-------: | :--------------: | :--------: | | Carlo | Corradini | `carlocorradini` | **192451** | | Anthony | Farina | `shinally97` | **194083** | <file_sep>/src/common/index.ts export * as types from './types'; export * as error from './error'; <file_sep>/src/config/index.ts export { default } from './config'; export * as config from './config'; <file_sep>/src/common/validator/chain/IsStringArray.ts // eslint-disable-next-line no-unused-vars import { CustomValidator } from 'express-validator'; const IsStringArray: CustomValidator = (input) => { return typeof input === 'string' && /^\w+(,\w+)*$/.test(input); }; export default IsStringArray; <file_sep>/src/db/repository/UserVerificationRepository.ts // eslint-disable-next-line no-unused-vars import { AbstractRepository, EntityRepository, EntityManager, getCustomRepository } from 'typeorm'; // eslint-disable-next-line no-unused-vars import UserVerification from '@app/db/entity/UserVerification'; import User from '@app/db/entity/User'; import { OTPUtil } from '@app/util'; import config from '@app/config'; import { EntityNotFoundError, DataMismatchError, UserAlreadyVerifiedError, } from '@app/common/error'; import { JWTHelper } from '@app/helper'; import { EmailService, PhoneService } from '@app/service'; import UserRepository from './UserRepository'; @EntityRepository(UserVerification) export default class UserVerificationRepository extends AbstractRepository<UserVerification> { public findOneOrFail(user: User, entityManager: EntityManager): Promise<UserVerification> { const callback = async (em: EntityManager) => { const foundVerication = await em .createQueryBuilder(UserVerification, 'uv') .leftJoinAndSelect('uv.user', 'user') .addSelect('user.verified') .addSelect('user.email') .addSelect('user.phone') .where('uv.user.id = :user_id', { user_id: user.id }) .getOne(); if (foundVerication === undefined) throw new EntityNotFoundError('No verification was found'); if (foundVerication.user.verified) throw new UserAlreadyVerifiedError('User was already verified'); return foundVerication; }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public saveOrFail(user: User, entityManager?: EntityManager): Promise<UserVerification> { const callback = async (em: EntityManager) => { const userVerification: UserVerification = await em.save( UserVerification, em.create(UserVerification, { user, otp_email: await OTPUtil.digits(config.SECURITY.OTP.EMAIL.DIGITS), otp_phone: await OTPUtil.digits(config.SECURITY.OTP.PHONE.DIGITS), }) ); // TODO Change from await EmailService.send({ from: '"<NAME>" <<EMAIL>>', to: user.email, subject: 'Happy Puppy OTP code', // TODO Remove Typescript ignore // @ts-ignore template: 'otp', context: { username: user.username, otp_code: userVerification.otp_email, }, }); await PhoneService.send({ from: config.SERVICE.PHONE.NUMBER, to: user.phone, body: `${user.username} OTP code: ${userVerification.otp_phone}`, }); return Promise.resolve(userVerification); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public verifyResendOrFail(user: User, entityManager?: EntityManager): Promise<UserVerification> { const callback = async (em: EntityManager) => { return this.saveOrFail((await this.findOneOrFail(user, em)).user, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public verifyOrFail( userVerification: UserVerification, entityManager?: EntityManager ): Promise<string> { const callback = async (em: EntityManager) => { const foundVerication = await this.findOneOrFail(userVerification.user, em); if ( foundVerication.otp_email !== userVerification.otp_email || foundVerication.otp_phone !== userVerification.otp_phone ) { throw new DataMismatchError('OTP codes does not match'); } const user: User = await getCustomRepository(UserRepository).updateOrFail( em.create(User, { id: foundVerication.user.id, verified: true }) ); return JWTHelper.sign({ id: user.id, role: user.role, }); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } } <file_sep>/src/route/api/v1/index.ts import { Router } from 'express'; import jwt from 'express-jwt'; import config from '@app/config'; // eslint-disable-next-line camelcase import user_verification from './user_verification'; import auth from './auth'; const router = Router(); router.use('/user_verification', user_verification); router.use( '/auth', jwt({ secret: config.SECURITY.JWT.SECRET, }).unless({ path: [ { url: '/api/v1/auth/user', methods: ['POST'] }, { url: '/api/v1/auth/user/sign_in', methods: ['POST'], }, { url: /\/api\/v1\/auth\/user\/password_reset\/*/, methods: ['POST'], }, ], }), auth ); export default router; <file_sep>/src/middleware/NotFoundMiddleware.ts // eslint-disable-next-line no-unused-vars import { Response, Request, NextFunction } from 'express'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; export default class NotFoundMiddleware { // eslint-disable-next-line no-unused-vars public static handle(_req: Request, res: Response, _next: NextFunction) { ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); } } <file_sep>/src/common/error/EmptyFileError.ts /* eslint-disable camelcase */ export default class EmptyFileError extends Error { public readonly file_name: string; constructor(m: string, file_name: string) { super(); this.name = EmptyFileError.name; this.message = m; this.file_name = file_name; Object.setPrototypeOf(this, new.target.prototype); } } <file_sep>/src/route/api/v1/auth/puppy.ts import { Router } from 'express'; import { checkSchema } from 'express-validator'; import Puppy, { PuppyValidationGroup, PuppyGender } from '@app/db/entity/Puppy'; import { PuppyController } from '@app/controller'; import { ValidatorMiddleware, FileMiddleware } from '@app/middleware'; import { IsNumberArray } from '@app/common/validator/chain'; const router = Router(); router.get( '', ValidatorMiddleware.validateChain( checkSchema({ limit: { in: ['query'], isInt: true, toInt: true, optional: true, }, offset: { in: ['query'], isInt: true, toInt: true, optional: true, }, sort: { in: ['query'], isString: true, optional: true, }, sort_order: { in: ['query'], isString: true, isIn: { options: ['ASC, DESC'], }, optional: true, }, id: { in: ['query'], isInt: true, toInt: true, optional: true, }, name: { in: ['query'], isString: true, optional: true, }, gender: { in: ['query'], isIn: { options: [Object.values(PuppyGender)] }, optional: true, }, date_of_birth: { in: ['query'], isISO8601: true, optional: true, }, weight: { in: ['query'], isInt: true, toInt: true, optional: true, }, user: { in: ['query'], isUUID: true, optional: true, }, specie: { in: ['query'], isInt: true, toInt: true, optional: true, }, created_at: { in: ['query'], isISO8601: true, optional: true, }, breeds: { in: ['query'], isString: true, custom: { options: IsNumberArray, }, optional: true, }, personalities: { in: ['query'], isString: true, custom: { options: IsNumberArray, }, optional: true, }, }) ), PuppyController.find ); router.get( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isInt: true, errorMessage: 'Invalid Puppy id', }, }) ), PuppyController.findById ); router.post( '', ValidatorMiddleware.validateClass(Puppy, PuppyValidationGroup.CREATION), PuppyController.create ); router.patch( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isInt: true, errorMessage: 'Invalid Puppy id', }, }) ), ValidatorMiddleware.validateClass(Puppy, PuppyValidationGroup.UPDATE), PuppyController.update ); router.patch( '/:id/avatar', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isInt: true, errorMessage: 'Invalid Puppy id', }, }) ), FileMiddleware.memoryLoader.single('image'), ValidatorMiddleware.validateFileSingle('image'), PuppyController.updateAvatar ); router.delete( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isInt: true, errorMessage: 'Invalid Puppy id', }, }) ), PuppyController.delete ); export default router; <file_sep>/src/db/repository/UserPasswordResetRepository.ts // eslint-disable-next-line no-unused-vars import { AbstractRepository, EntityManager, EntityRepository, getCustomRepository } from 'typeorm'; import moment from 'moment'; import config from '@app/config'; import UserPasswordReset from '@app/db/entity/UserPasswordReset'; // eslint-disable-next-line no-unused-vars import User from '@app/db/entity/User'; import { OTPUtil } from '@app/util'; import { EmailService } from '@app/service'; import { InvalidTokenException } from '@app/common/error'; import UserRepository from './UserRepository'; @EntityRepository(UserPasswordReset) export default class UserPasswordResetRepository extends AbstractRepository<UserPasswordReset> { public request(user: User, entityManager?: EntityManager): Promise<UserPasswordReset> { const callback = async (em: EntityManager) => { const userPasswordReset: UserPasswordReset = await em.save( UserPasswordReset, em.create(UserPasswordReset, { user, token: await OTPUtil.alphanumerical(config.SECURITY.TOKEN.PASSWORD.LENGTH), used: false, }) ); // TODO Change from await EmailService.send({ from: '"Happy Puppy" <<EMAIL>>', to: user.email, subject: 'Happy Puppy password reset', // TODO Remove Typescript ignore // @ts-ignore template: 'password_reset', context: { username: user.username, token: <PASSWORD>PasswordReset.token, }, }); return Promise.resolve(userPasswordReset); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public change( userPasswordReset: UserPasswordReset, entityManager?: EntityManager ): Promise<User> { const callback = async (em: EntityManager) => { const foundUserPasswordReset: UserPasswordReset = await em.findOneOrFail(UserPasswordReset, { where: { token: userPasswordReset.token, used: false }, relations: ['user'], }); if ( foundUserPasswordReset.used || moment(new Date()).diff(foundUserPasswordReset.updated_at, 'minutes') > config.SECURITY.TOKEN.PASSWORD.EXPIRES_IN ) { throw new InvalidTokenException('User password reset token is expired'); } foundUserPasswordReset.user.password = <PASSWORD>; foundUserPasswordReset.used = true; await em.save(UserPasswordReset, foundUserPasswordReset); return getCustomRepository(UserRepository).updateOrFail(foundUserPasswordReset.user, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } } <file_sep>/src/db/repository/UserFriendRepository.ts import { AbstractRepository, EntityRepository, // eslint-disable-next-line no-unused-vars EntityManager, // eslint-disable-next-line no-unused-vars SaveOptions, Not, getManager, // eslint-disable-next-line no-unused-vars DeleteResult, } from 'typeorm'; import UserFriend, { UserFriendType } from '@app/db/entity/UserFriend'; // eslint-disable-next-line no-unused-vars import { DuplicateEntityError } from '@app/common/error'; // eslint-disable-next-line no-unused-vars import { Duplicate } from '@app/common/error/DuplicateEntityError'; import { EntityUtil } from '@app/util'; import User from '@app/db/entity/User'; @EntityRepository(UserFriend) export default class UserFriendRepository extends AbstractRepository<UserFriend> { public static findFriendMatch( userFriend: UserFriend, entityManager?: EntityManager ): Promise<{ from: UserFriend; to: UserFriend }> { const callback = async (em: EntityManager) => { const userFriendFrom: UserFriend = await em .findOneOrFail( UserFriend, { user: userFriend.user, friend: userFriend.friend, }, { loadRelationIds: true } ) .then((_friend) => { // eslint-disable-next-line no-param-reassign _friend.user = em.create(User, { id: (_friend.user as unknown) as string }); // eslint-disable-next-line no-param-reassign _friend.friend = em.create(User, { id: (_friend.friend as unknown) as string }); return _friend; }); const userFriendTo: UserFriend = await em .findOneOrFail( UserFriend, { user: userFriend.friend, friend: userFriend.user, }, { loadRelationIds: true } ) .then((_friend) => { // eslint-disable-next-line no-param-reassign _friend.user = em.create(User, { id: (_friend.user as unknown) as string }); // eslint-disable-next-line no-param-reassign _friend.friend = em.create(User, { id: (_friend.friend as unknown) as string }); return _friend; }); return { from: userFriendFrom, to: userFriendTo }; }; return callback(entityManager === undefined ? getManager() : entityManager); } public saveOrFail(userFriend: UserFriend, entityManager?: EntityManager): Promise<UserFriend> { const callback = async (em: EntityManager) => { const userFriendFrom: UserFriend = em.create(UserFriend, { user: userFriend.user, friend: userFriend.friend, type: UserFriendType.WAITING_ACCEPTANCE, }); const userFriendTo: UserFriend = em.create(UserFriend, { user: userFriend.friend, friend: userFriend.user, type: UserFriendType.FRIEND_REQUEST, }); await UserFriendRepository.saveUnique(userFriendTo, em); return UserFriendRepository.saveUnique(userFriendFrom, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public updateOrFail(userFriend: UserFriend, entityManager?: EntityManager): Promise<UserFriend> { const callback = async (em: EntityManager) => { const friendMatch = await UserFriendRepository.findFriendMatch(userFriend, em); switch (true) { case userFriend.type === UserFriendType.FRIEND && friendMatch.from.type === UserFriendType.FRIEND_REQUEST && friendMatch.to.type === UserFriendType.WAITING_ACCEPTANCE: { // Friend request accepted await em.merge(UserFriend, friendMatch.from, { type: UserFriendType.FRIEND }); await em.merge(UserFriend, friendMatch.to, { type: UserFriendType.FRIEND }); break; } case userFriend.type === UserFriendType.BLOCKED && friendMatch.from.type === UserFriendType.FRIEND: { // Friend blocked await em.merge(UserFriend, friendMatch.from, { type: UserFriendType.BLOCKED }); break; } case userFriend.type === UserFriendType.FRIEND && friendMatch.from.type === UserFriendType.BLOCKED: { // Friend unblocked await em.merge(UserFriend, friendMatch.from, { type: UserFriendType.FRIEND }); break; } default: { break; } } await UserFriendRepository.updateUnique(friendMatch.to, em); return UserFriendRepository.updateUnique(friendMatch.from, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public deleteOrFail( userFriend: UserFriend, entityManager?: EntityManager ): Promise<DeleteResult> { const callback = async (em: EntityManager) => { await UserFriendRepository.findFriendMatch(userFriend); await em.delete(UserFriend, { user: userFriend.friend, friend: userFriend.user }); return em.delete(UserFriend, { user: userFriend.user, friend: userFriend.friend }); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } private static async saveUnique( userFriend: UserFriend, entityManager: EntityManager, saveOptions?: SaveOptions, isUpdateOperation?: boolean ): Promise<UserFriend> { const duplicateFields = new Set<Duplicate>(); const uniqueColumns = EntityUtil.uniqueColumns(UserFriend); const whereConditions = uniqueColumns.map((uf) => { return { [uf]: userFriend[uf], ...(isUpdateOperation === true && { user: Not(userFriend.user), friend: Not(userFriend.friend), }), }; }); const duplicateEntities = await entityManager.find(UserFriend, { where: whereConditions, select: uniqueColumns, }); duplicateEntities.forEach((_userFriend) => { uniqueColumns.forEach((uf) => { if (userFriend[uf] === _userFriend[uf]) { duplicateFields.add({ property: uf.toString(), value: userFriend[uf] }); } }); }); if ( !isUpdateOperation && (await entityManager.findOne(UserFriend, { user: userFriend.user, friend: userFriend.friend, })) !== undefined ) { duplicateFields.add({ property: `{ user, friend }`, value: { user: userFriend.user.id, friend: userFriend.friend.id }, }); } if (duplicateFields.size !== 0) throw new DuplicateEntityError( `Duplicate User Friend entity found`, Array.from(duplicateFields) ); return entityManager.save(UserFriend, userFriend, saveOptions); } private static updateUnique( userFriend: UserFriend, entityManager: EntityManager, saveOptions?: SaveOptions ): Promise<UserFriend> { return this.saveUnique(userFriend, entityManager, saveOptions, true); } } <file_sep>/src/logger/logger.ts import path from 'path'; import { createLogger, format, transports } from 'winston'; import DailyRotateFile from 'winston-daily-rotate-file'; const logger = createLogger({ level: process.env.NODE_ENV === 'development' ? 'debug' : 'info', exitOnError: false, format: format.combine( format.label({ label: path.basename(process.mainModule !== undefined ? process.mainModule.filename : '?'), }), format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss:SSS' }) ), transports: [ new transports.Console({ format: format.combine( format((info) => { // eslint-disable-next-line no-param-reassign info.level = info.level.toUpperCase(); return info; })(), format.colorize(), format.printf( (info) => `${info.timestamp} ${info.level} [${info.label}]: ${info.message .replace(/\s+/g, ' ') .trim()}` ) ), }), new DailyRotateFile({ dirname: './log', filename: '%DATE%', extension: '.log', datePattern: 'YYYY-MM-DD', zippedArchive: true, maxSize: '1m', maxFiles: '16d', format: format.combine( format.printf( (info) => `${info.timestamp} ${info.level.toUpperCase()} [${info.label}]: ${info.message .replace(/\s+/g, ' ') .trim()}` ) ), }), ], }); logger.info(`Logger initialized at ${logger.level} level`); export default logger; <file_sep>/src/service/EmailService.ts import path from 'path'; import nodemailer from 'nodemailer'; // eslint-disable-next-line no-unused-vars import Mail from 'nodemailer/lib/mailer'; import hbs from 'nodemailer-express-handlebars'; import config from '@app/config'; import logger from '@app/logger'; import { ConfigurationError } from '@app/common/error'; export default class EmailService { private static transport: Mail; private static configured: boolean = false; public static configure(): void { if (this.configured) return; this.transport = nodemailer.createTransport({ host: config.SERVICE.EMAIL.HOST, port: config.SERVICE.EMAIL.PORT, secure: config.SERVICE.EMAIL.SECURE, auth: { user: config.SERVICE.EMAIL.USERNAME, pass: config.SERVICE.EMAIL.PASSWORD, }, }); this.transport.use( 'compile', hbs({ viewEngine: { extName: '.hbs', layoutsDir: path.join(__dirname, '../view/email'), partialsDir: path.join(__dirname, '../view/email'), defaultLayout: false, }, viewPath: path.join(__dirname, '../view/email'), extName: '.hbs', }) ); this.configured = true; logger.info('Email service configured'); } public static send(mailOptions: Mail.Options): Promise<any> { if (!this.configured) throw new ConfigurationError('Email Service is not configured'); return new Promise((resolve, reject) => { this.transport.sendMail(mailOptions, (err, info) => { if (err) { logger.error(`Error sending email due to ${err.message}`); reject(err); } else { logger.info(`Sended email ${JSON.stringify(info)}`); resolve(info); } }); }); } } <file_sep>/src/db/entity/AnimalSpecie.ts /* eslint-disable camelcase */ import { Entity, Index, Column, PrimaryColumn, Check, OneToMany, CreateDateColumn, UpdateDateColumn, } from 'typeorm'; import { IsEmpty } from 'class-validator'; import AnimalBreed from './AnimalBreed'; @Entity('animal_specie') @Check(`"id" > 0`) export default class AnimalSpecie { @PrimaryColumn({ name: 'id' }) @Index() id!: number; @Column({ name: 'name', length: 64, unique: true }) name!: string; @OneToMany(() => AnimalBreed, (breed) => breed.specie) breeds!: AnimalBreed[]; @CreateDateColumn({ name: 'created_at', select: false, update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; } <file_sep>/src/route/api/v1/user_verification.ts import { Router } from 'express'; import { checkSchema } from 'express-validator'; import UserVerification from '@app/db/entity/UserVerification'; import { ValidatorMiddleware } from '@app/middleware'; import { UserVerificationController } from '@app/controller'; const router = Router(); router.post( '', ValidatorMiddleware.validateClass(UserVerification), UserVerificationController.verify ); router.post( '/:id/resend', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isUUID: true, errorMessage: 'Invalid User id', }, }) ), UserVerificationController.resend ); export default router; <file_sep>/src/controller/AnimalBreedController.ts /* eslint-disable camelcase */ // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getManager } from 'typeorm'; import logger from '@app/logger'; import AnimalBreed from '@app/db/entity/AnimalBreed'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; export default class AnimalSpecieController { public static find(req: Request, res: Response): void { const { limit, offset, sort, sort_order, id, name, specie } = req.query; getManager() .find(AnimalBreed, { ...(limit !== undefined && { take: (limit as unknown) as number }), ...(offset !== undefined && { skip: (offset as unknown) as number }), ...(sort !== undefined && sort_order !== undefined && { order: { [sort as keyof AnimalBreed]: sort_order, }, }), loadRelationIds: true, where: { ...(id !== undefined && { id }), ...(name !== undefined && { name }), ...(specie !== undefined && { specie }), }, }) .then((animalBreeds) => { logger.info(`Found ${animalBreeds.length} Animal Breeds`); ResponseHelper.send(res, HttpStatusCode.OK, animalBreeds); }) .catch((ex) => { logger.warn(`Failed to find Animal Breeds due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static findById(req: Request, res: Response): void { const { id } = req.params; getManager() .findOneOrFail(AnimalBreed, id, { loadRelationIds: true }) .then((animalBreed) => { logger.info(`Found Animal Breed ${animalBreed.id}`); ResponseHelper.send(res, HttpStatusCode.OK, animalBreed); }) .catch((ex) => { logger.warn(`Failed to find Animal Breed ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/middleware/index.ts export { default as NotFoundMiddleware } from './NotFoundMiddleware'; export { default as ErrorMiddleware } from './ErrorMiddleware'; export { default as ValidatorMiddleware } from './ValidatorMiddleware'; export { default as FileMiddleware } from './FileMiddleware'; <file_sep>/src/middleware/FileMiddleware.ts import path from 'path'; // eslint-disable-next-line no-unused-vars import multer from 'multer'; import convert from 'convert-units'; export default class FileMiddleware { public static readonly memoryLoader = multer({ storage: multer.memoryStorage(), limits: { fileSize: convert(2).from('Mb').to('b'), }, }); public static readonly diskLoader = multer({ storage: multer.diskStorage({ destination: (_req, _file, cb) => { cb(null, path.join(__dirname, '../tmp/upload')); }, }), limits: { fileSize: convert(64).from('Mb').to('b'), }, }); } <file_sep>/src/common/validator/IsValidAnimalSpecie.ts /* eslint-disable class-methods-use-this */ import { ValidatorConstraint, // eslint-disable-next-line no-unused-vars ValidatorConstraintInterface, // eslint-disable-next-line no-unused-vars ValidationOptions, registerDecorator, // eslint-disable-next-line no-unused-vars ValidationArguments, } from 'class-validator'; import { getManager } from 'typeorm'; import AnimalSpecie from '@app/db/entity/AnimalSpecie'; @ValidatorConstraint({ async: true }) export class IsValidAnimalSpecieConstraint implements ValidatorConstraintInterface { async validate(id: number) { return (await getManager().findOne(AnimalSpecie, id)) !== undefined; } defaultMessage(args: ValidationArguments) { return `${args.property} must be a valid identifier, ${args.value} is unknown`; } } export default function IsValidAnimalSpecie(validationOptions?: ValidationOptions) { return (object: Object, propertyName: string) => { registerDecorator({ name: 'isValidAnimalSpecie', target: object.constructor, propertyName, options: validationOptions, validator: IsValidAnimalSpecieConstraint, }); }; } <file_sep>/src/db/repository/PuppyRepository.ts import { AbstractRepository, EntityRepository, // eslint-disable-next-line no-unused-vars EntityManager, // eslint-disable-next-line no-unused-vars SaveOptions, Not, // eslint-disable-next-line no-unused-vars DeleteResult, } from 'typeorm'; import Puppy from '@app/db/entity/Puppy'; import AnimalSpecie from '@app/db/entity/AnimalSpecie'; import Personality from '@app/db/entity/AnimalPersonality'; import AnimalBreed from '@app/db/entity/AnimalBreed'; // eslint-disable-next-line no-unused-vars import { DuplicateEntityError } from '@app/common/error'; // eslint-disable-next-line no-unused-vars import { Duplicate } from '@app/common/error/DuplicateEntityError'; import { EntityUtil } from '@app/util'; import ImageService, { ImageType } from '@app/service/ImageService'; @EntityRepository(Puppy) export default class PuppyRepository extends AbstractRepository<Puppy> { public saveOrFail(puppy: Puppy, entityManager?: EntityManager): Promise<Puppy> { const callback = async (em: EntityManager) => { // eslint-disable-next-line no-param-reassign puppy.specie = await em.findOneOrFail(AnimalSpecie, { id: (puppy.specie as unknown) as number, }); // eslint-disable-next-line no-param-reassign puppy.breeds = puppy.breeds && puppy.breeds.length > 0 ? [ await em.findOneOrFail( AnimalBreed, { id: (puppy.breeds[0] as unknown) as number }, { select: ['id', 'name'] } ), ...puppy.breeds .slice(1) .map((id) => em.create(AnimalBreed, { id: (id as unknown) as number })), ] : []; // eslint-disable-next-line no-param-reassign puppy.personalities = puppy.personalities ? puppy.personalities.map((id) => em.create(Personality, { id: (id as unknown) as number })) : []; return PuppyRepository.saveUnique(puppy, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public updateOrFail(puppy: Puppy, entityManager?: EntityManager): Promise<Puppy> { const callback = async (em: EntityManager) => { if (puppy.personalities) { // eslint-disable-next-line no-param-reassign puppy.personalities = await puppy.personalities.map((id) => em.create(Personality, { id: (id as unknown) as number }) ); } if (puppy.breeds) { // eslint-disable-next-line no-param-reassign puppy.breeds = await puppy.breeds.map((id) => em.create(AnimalBreed, { id: (id as unknown) as number }) ); } const puppyToUpdate: Puppy = await em.findOneOrFail(Puppy, puppy.id, { where: { user: puppy.user }, }); await em.merge(Puppy, puppyToUpdate, puppy); return PuppyRepository.updateUnique(puppyToUpdate, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public updateAvataOrFail( puppy: Puppy, avatar: Express.Multer.File, entityManager?: EntityManager ): Promise<Puppy> { const callback = async (em: EntityManager) => { const avatarResult = await ImageService.upload(avatar, { type: ImageType.AVATAR, folder: 'puppy/avatar', }); // eslint-disable-next-line no-param-reassign puppy.avatar = avatarResult.secure_url; return this.updateOrFail(puppy, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public deleteOrFail(puppy: Puppy, entityManager?: EntityManager): Promise<DeleteResult> { const callback = async (em: EntityManager) => { await em.findOneOrFail(Puppy, puppy.id, { where: { user: puppy.user }, }); return em.delete(Puppy, puppy.id); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } private static async saveUnique( puppy: Puppy, entityManager: EntityManager, saveOptions?: SaveOptions, isUpdateOperation?: boolean ): Promise<Puppy> { const duplicateFields = new Set<Duplicate>(); const uniqueColumns = EntityUtil.uniqueColumns(Puppy); const whereConditions = uniqueColumns.map((u) => { return { [u]: puppy[u], ...(isUpdateOperation === true && { id: Not(puppy.id), }), }; }); const duplicateEntities = await entityManager.find(Puppy, { where: whereConditions, select: uniqueColumns, }); duplicateEntities.forEach((_puppy) => { uniqueColumns.forEach((u) => { if (puppy[u] === _puppy[u]) { duplicateFields.add({ property: u.toString(), value: puppy[u] }); } }); }); if ( !isUpdateOperation && (await entityManager.findOne(Puppy, { where: { id: puppy.id, }, })) !== undefined ) { duplicateFields.add({ property: `id`, value: puppy.id, }); } if (duplicateFields.size !== 0) throw new DuplicateEntityError(`Duplicate Puppy entity found`, Array.from(duplicateFields)); return entityManager.save(Puppy, puppy, saveOptions); } private static updateUnique( puppy: Puppy, entityManager: EntityManager, saveOptions?: SaveOptions ): Promise<Puppy> { return this.saveUnique(puppy, entityManager, saveOptions, true); } } <file_sep>/src/route/index.ts // eslint-disable-next-line no-unused-vars import { Router, Request, Response } from 'express'; import api from './api'; import site from './site'; const router = Router(); router.get('/', (_req: Request, res: Response) => { res.redirect('/site'); }); router.use('/api', api); router.use('/site', site); export default router; <file_sep>/src/util/CryptUtil.ts import bcrypt from 'bcryptjs'; import { CryptError } from '@app/common/error'; export default class CryptUtil { private static readonly SALT_ROUNDS: number = 10; public static async hash(s: string): Promise<string> { return bcrypt.hash(s, await bcrypt.genSalt(CryptUtil.SALT_ROUNDS)); } public static async compare(s: string, hash: string): Promise<boolean> { return bcrypt.compare(s, hash); } public static async compareOrFail(s: string, hash: string): Promise<boolean> { const equals: boolean = await CryptUtil.compare(s, hash); return equals ? Promise.resolve(true) : Promise.reject(new CryptError('The string is not comparable with the hash provided')); } public static async getRounds(hash: string): Promise<number> { return bcrypt.getRounds(hash); } public static async getSalt(hash: string): Promise<string> { return bcrypt.getSalt(hash); } } <file_sep>/src/controller/AnimalSpecieController.ts /* eslint-disable camelcase */ // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getManager } from 'typeorm'; import logger from '@app/logger'; import AnimalSpecie from '@app/db/entity/AnimalSpecie'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; import { StringUtil, ArrayUtil } from '@app/util'; export default class AnimalSpecieController { public static find(req: Request, res: Response): void { const { limit, offset, sort, sort_order, id, name, breeds } = req.query; const breedsArray: number[] = StringUtil.toNumberArray(breeds as string); getManager() .find(AnimalSpecie, { ...(limit !== undefined && { take: (limit as unknown) as number }), ...(offset !== undefined && { skip: (offset as unknown) as number }), ...(sort !== undefined && sort_order !== undefined && { order: { [sort as keyof AnimalSpecie]: sort_order, }, }), loadRelationIds: true, where: { ...(id !== undefined && { id }), ...(name !== undefined && { name }), }, }) .then((animalSpecies) => { // eslint-disable-next-line no-param-reassign animalSpecies = animalSpecies.filter((specie) => ArrayUtil.contains(specie.breeds, breedsArray) ); logger.info(`Found ${animalSpecies.length} Animal Species`); ResponseHelper.send(res, HttpStatusCode.OK, animalSpecies); }) .catch((ex) => { logger.warn(`Failed to find Animal Species due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static findById(req: Request, res: Response): void { const { id } = req.params; getManager() .findOneOrFail(AnimalSpecie, id) .then((animalSpecie) => { logger.info(`Found Animal Specie ${animalSpecie.id}`); ResponseHelper.send(res, HttpStatusCode.OK, animalSpecie); }) .catch((ex) => { logger.warn(`Failed to find Animal Specie ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/db/entity/AnimalBreed.ts /* eslint-disable camelcase */ import { Entity, Index, Column, PrimaryColumn, Check, ManyToOne, JoinColumn, CreateDateColumn, UpdateDateColumn, } from 'typeorm'; import { IsEmpty } from 'class-validator'; import AnimalSpecie from './AnimalSpecie'; @Entity('animal_breed') @Check(`"id" > 0`) export default class AnimalBreed { @PrimaryColumn({ name: 'id' }) @Index() id!: number; @Column({ name: 'name', length: 64, unique: true }) name!: string; @ManyToOne(() => AnimalSpecie, (specie) => specie.breeds, { nullable: false, onDelete: 'CASCADE', }) @JoinColumn({ name: 'specie_id' }) specie!: AnimalSpecie; @CreateDateColumn({ name: 'created_at', select: false, update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; } <file_sep>/src/controller/AnimalPersonalityController.ts /* eslint-disable camelcase */ // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getManager } from 'typeorm'; import logger from '@app/logger'; import AnimalPersonality from '@app/db/entity/AnimalPersonality'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; export default class AnimalPersonalityController { public static find(req: Request, res: Response): void { const { limit, offset, sort, sort_order, id, name } = req.query; getManager() .find(AnimalPersonality, { ...(limit !== undefined && { take: (limit as unknown) as number }), ...(offset !== undefined && { skip: (offset as unknown) as number }), ...(sort !== undefined && sort_order !== undefined && { order: { [sort as keyof AnimalPersonality]: sort_order, }, }), loadRelationIds: true, where: { ...(id !== undefined && { id }), ...(name !== undefined && { name }), }, }) .then((personalities) => { logger.info(`Found ${personalities.length} Animal Personalities`); ResponseHelper.send(res, HttpStatusCode.OK, personalities); }) .catch((ex) => { logger.warn(`Failed to find Animal Personalities due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static findById(req: Request, res: Response): void { const { id } = req.params; getManager() .findOneOrFail(AnimalPersonality, id) .then((personality) => { logger.info(`Found Animal Personality ${personality.id}`); ResponseHelper.send(res, HttpStatusCode.OK, personality); }) .catch((ex) => { logger.warn(`Failed to find Animal Personality ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/util/GisUtil.ts export interface Coordinates { latitude: number; longitude: number; } export interface BoundingBox { pivot: Coordinates; radius: number; min: Coordinates; max: Coordinates; } export default class GisUtil { // Earth's mean radius in metres public static readonly EARTH_RADIUS: number = 6371e3; public static toCoordinates(latitude: number | string, longitude: number | string): Coordinates { // eslint-disable-next-line no-param-reassign if (typeof latitude === 'string') latitude = Number.parseFloat(latitude); // eslint-disable-next-line no-param-reassign if (typeof longitude === 'string') longitude = Number.parseFloat(longitude); return { latitude, longitude }; } public static calculateBoundingBox( coordinates: Coordinates, radius: number | string ): BoundingBox { // eslint-disable-next-line no-param-reassign if (typeof radius === 'string') radius = Number.parseInt(radius, 10); return { pivot: coordinates, radius, min: { latitude: coordinates.latitude - ((radius / this.EARTH_RADIUS) * 180) / Math.PI, longitude: coordinates.longitude - ((radius / this.EARTH_RADIUS) * 180) / Math.PI / Math.cos((coordinates.latitude * Math.PI) / 180), }, max: { latitude: coordinates.latitude + ((radius / this.EARTH_RADIUS) * 180) / Math.PI, longitude: coordinates.longitude + ((radius / this.EARTH_RADIUS) * 180) / Math.PI / Math.cos((coordinates.latitude * Math.PI) / 180), }, }; } /** * @see https://www.movable-type.co.uk/scripts/latlong-db.html */ public static distance(start: Coordinates, end: Coordinates): number { return ( Math.acos( Math.sin((end.latitude * Math.PI) / 180) * Math.sin((start.latitude * Math.PI) / 180) + Math.cos((end.latitude * Math.PI) / 180) * Math.cos((start.latitude * Math.PI) / 180) * Math.cos((end.longitude * Math.PI) / 180 - (start.longitude * Math.PI) / 180) ) * this.EARTH_RADIUS ); } } <file_sep>/src/common/validator/IsAnimalBreedArrayBelongToAnimalSpecie.ts import { // eslint-disable-next-line no-unused-vars ValidationOptions, registerDecorator, ValidatorConstraint, // eslint-disable-next-line no-unused-vars ValidatorConstraintInterface, // eslint-disable-next-line no-unused-vars ValidationArguments, } from 'class-validator'; import { getManager } from 'typeorm'; import AnimalBreed from '@app/db/entity/AnimalBreed'; import AnimalSpecie from '@app/db/entity/AnimalSpecie'; // eslint-disable-next-line no-unused-vars import Puppy from '@app/db/entity/Puppy'; @ValidatorConstraint({ async: true }) export class IsAnimalBreedArrayBelongToAnimalSpecieConstraint implements ValidatorConstraintInterface { private invalidIds: number[] = []; private specie: AnimalSpecie | undefined = undefined; async validate(ids: number[], args: ValidationArguments) { if (ids.length === 0) return true; this.specie = getManager().create(AnimalSpecie, { id: ((args.object as Puppy).specie as unknown) as number, }); const validIds: number[] = await getManager() .find(AnimalBreed, { where: ids.map((id) => { return { id, specie: this.specie }; }), }) .then((validBreeds) => validBreeds.map((breed) => breed.id)); this.invalidIds = ids.filter((id) => validIds.indexOf(id) === -1); return this.invalidIds.length === 0; } defaultMessage(args: ValidationArguments) { return `${args.property} must contain identifiers for the same specie, ${JSON.stringify( this.invalidIds )} does not belong to specie ${this.specie?.id}`; } } export default function IsAnimalBreedArrayBelongToAnimalSpecie( validationOptions?: ValidationOptions ) { return (object: Object, propertyName: string) => { registerDecorator({ name: 'isAnimalBreedArrayBelongToAnimalSpecie', target: object.constructor, propertyName, options: validationOptions, validator: IsAnimalBreedArrayBelongToAnimalSpecieConstraint, }); }; } <file_sep>/src/db/entity/UserFriend.ts /* eslint-disable camelcase */ import { Entity, ManyToOne, JoinColumn, UpdateDateColumn, CreateDateColumn, Column, Check, } from 'typeorm'; import { IsEmpty, IsUUID, IsEnum, IsOptional } from 'class-validator'; import User from './User'; export enum UserFriendValidationGroup { // eslint-disable-next-line no-unused-vars CREATION = 'creation', // eslint-disable-next-line no-unused-vars UPDATE = 'update', } export enum UserFriendType { // eslint-disable-next-line no-unused-vars FRIEND = 'friend', // eslint-disable-next-line no-unused-vars BLOCKED = 'blocked', // eslint-disable-next-line no-unused-vars FRIEND_REQUEST = 'friend_request', // eslint-disable-next-line no-unused-vars WAITING_ACCEPTANCE = 'waiting_acceptance', } @Entity('user_friend') @Check(`"user_id" <> "friend_id"`) export default class UserFriend { @ManyToOne(() => User, { primary: true, onDelete: 'CASCADE' }) @JoinColumn({ name: 'user_id' }) @IsEmpty({ always: true }) user!: User; @ManyToOne(() => User, (user) => user.friends, { primary: true, onDelete: 'CASCADE' }) @JoinColumn({ name: 'friend_id' }) @IsUUID(undefined, { groups: [UserFriendValidationGroup.CREATION], }) @IsEmpty({ groups: [UserFriendValidationGroup.UPDATE] }) friend!: User; @Column({ name: 'type', type: 'enum', enum: UserFriendType, }) @IsEnum(UserFriendType, { groups: [UserFriendValidationGroup.UPDATE] }) @IsEmpty({ groups: [UserFriendValidationGroup.CREATION] }) @IsOptional({ groups: [UserFriendValidationGroup.UPDATE] }) type!: UserFriendType; @CreateDateColumn({ name: 'created_at', update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; } <file_sep>/src/controller/UserVerificationController.ts // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getCustomRepository, getManager } from 'typeorm'; import logger from '@app/logger'; import User from '@app/db/entity/User'; // eslint-disable-next-line no-unused-vars import UserVerification from '@app/db/entity/UserVerification'; import UserVerificationRepository from '@app/db/repository/UserVerificationRepository'; import { UserAlreadyVerifiedError, EntityNotFoundError, DataMismatchError, } from '@app/common/error'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; export default class UserController { public static verify(req: Request, res: Response): void { const userVerification: UserVerification = req.app.locals.UserVerification; userVerification.user = getManager().create(User, { id: (userVerification.user as unknown) as string, }); getCustomRepository(UserVerificationRepository) .verifyOrFail(userVerification) .then((token) => { logger.info(`User Verification succeeded for ${userVerification.user.id}`); ResponseHelper.send(res, HttpStatusCode.OK, token); }) .catch((ex) => { logger.warn(`Failed to Verify User ${userVerification.user.id} due to ${ex.message}`); if (ex.name === 'EntityNotFound' || ex instanceof EntityNotFoundError) ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else if (ex instanceof UserAlreadyVerifiedError) ResponseHelper.send(res, HttpStatusCode.FORBIDDEN); else if (ex instanceof DataMismatchError) ResponseHelper.send(res, HttpStatusCode.UNAUTHORIZED); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static resend(req: Request, res: Response): void { const { id } = req.params; getManager() .findOneOrFail(User, id) .then((user) => getCustomRepository(UserVerificationRepository).verifyResendOrFail(user)) .then((userVerification) => { logger.info(`Resended User Verification for ${userVerification.user.id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn(`Failed to resend User Verification for ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound' || ex instanceof EntityNotFoundError) ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else if (ex instanceof UserAlreadyVerifiedError) ResponseHelper.send(res, HttpStatusCode.FORBIDDEN); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/db/entity/AnimalPersonality.ts /* eslint-disable camelcase */ import { Entity, Index, Column, Check, PrimaryColumn, CreateDateColumn, UpdateDateColumn, } from 'typeorm'; import { IsEmpty } from 'class-validator'; @Entity('animal_personality') @Check(`"id" > 0`) export default class AnimalPersonality { @PrimaryColumn({ name: 'id' }) @Index() id!: number; @Column({ name: 'name', length: 64, unique: true }) name!: string; @CreateDateColumn({ name: 'created_at', select: false, update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; } <file_sep>/src/common/error/index.ts export { default as CryptError } from './CryptError'; export { default as DuplicateEntityError } from './DuplicateEntityError'; export { default as UserNotVerifiedError } from './UserNotVerifiedError'; export { default as UserAlreadyVerifiedError } from './UserAlreadyVerifiedError'; export { default as EntityNotFoundError } from './EntityNotFoundError'; export { default as DataMismatchError } from './DataMismatchError'; export { default as ConfigurationError } from './ConfigurationError'; export { default as EmptyFileError } from './EmptyFileError'; export { default as InvalidTokenException } from './InvalidTokenException'; <file_sep>/src/common/error/InvalidTokenException.ts export default class InvalidTokenException extends Error { constructor(m: string) { super(m); Object.setPrototypeOf(this, new.target.prototype); } } <file_sep>/src/common/validator/chain/IsNumberArray.ts // eslint-disable-next-line no-unused-vars import { CustomValidator } from 'express-validator'; const IsNumberArray: CustomValidator = (input) => { return typeof input === 'string' && /^\d+(,\d+)*$/.test(input); }; export default IsNumberArray; <file_sep>/src/service/index.ts export { default as EmailService } from './EmailService'; export { default as PhoneService } from './PhoneService'; export { default as ImageService } from './ImageService'; <file_sep>/src/helper/index.ts export { default as HttpStatusCode } from './httpStatusCode'; export { default as ResponseHelper } from './responseHelper'; export { default as JWTHelper } from './JWTHelper'; <file_sep>/src/route/api/v1/auth/user.ts import { Router } from 'express'; import { checkSchema } from 'express-validator'; import User, { UserValidationGroup, UserGender, UserRole } from '@app/db/entity/User'; import UserPasswordReset from '@app/db/entity/UserPasswordReset'; import { UserController } from '@app/controller'; import { ValidatorMiddleware, FileMiddleware } from '@app/middleware'; const router = Router(); router.get( '', ValidatorMiddleware.validateChain( checkSchema({ limit: { in: ['query'], isInt: true, toInt: true, optional: true, }, offset: { in: ['query'], isInt: true, toInt: true, optional: true, }, sort: { in: ['query'], isString: true, optional: true, }, sort_order: { in: ['query'], isString: true, isIn: { options: ['ASC, DESC'], }, optional: true, }, id: { in: ['query'], isUUID: true, optional: true, }, username: { in: ['query'], isString: true, optional: true, }, role: { in: ['query'], isIn: { options: [Object.values(UserRole)] }, optional: true, }, verified: { in: ['query'], isBoolean: true, toBoolean: true, optional: true, }, name: { in: ['query'], isString: true, optional: true, }, surname: { in: ['query'], isString: true, optional: true, }, gender: { in: ['query'], isIn: { options: [Object.values(UserGender)] }, optional: true, }, date_of_birth: { in: ['query'], isISO8601: true, optional: true, }, created_at: { in: ['query'], isISO8601: true, optional: true, }, }) ), UserController.find ); router.get('/me', UserController.me); router.get( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isUUID: true, errorMessage: 'Invalid User id', }, }) ), UserController.findById ); router.post( '', ValidatorMiddleware.validateClass(User, UserValidationGroup.CREATION), UserController.create ); router.post( '/sign_in', ValidatorMiddleware.validateClass(User, UserValidationGroup.SIGN_IN), UserController.signIn ); router.post( '/password_reset/:email', ValidatorMiddleware.validateChain( checkSchema({ email: { in: ['params'], isEmail: true, errorMessage: 'Invalid email', }, }) ), UserController.passwordResetRequest ); router.post( '/password_reset', ValidatorMiddleware.validateClass(UserPasswordReset), UserController.passwordReset ); router.patch( '', ValidatorMiddleware.validateClass(User, UserValidationGroup.UPDATE), UserController.update ); router.patch( '/avatar', FileMiddleware.memoryLoader.single('image'), ValidatorMiddleware.validateFileSingle('image'), UserController.updateAvatar ); router.delete('', UserController.delete); export default router; <file_sep>/src/controller/PuppyController.ts /* eslint-disable camelcase */ // eslint-disable-next-line no-unused-vars import { Request, Response } from 'express'; import { getRepository, getCustomRepository, getManager, Between, Like } from 'typeorm'; import moment from 'moment'; import logger from '@app/logger'; import Puppy from '@app/db/entity/Puppy'; import PuppyRepository from '@app/db/repository/PuppyRepository'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; import { DuplicateEntityError } from '@app/common/error'; import User from '@app/db/entity/User'; import { StringUtil, ArrayUtil } from '@app/util'; export default class PuppyController { public static find(req: Request, res: Response): void { const { limit, offset, sort, sort_order, id, name, gender, date_of_birth, weight, user, specie, breeds, personalities, created_at, } = req.query; const breedsArray: number[] = StringUtil.toNumberArray(breeds as string); const personalitiesArray: number[] = StringUtil.toNumberArray(personalities as string); getManager() .find(Puppy, { ...(limit !== undefined && { take: (limit as unknown) as number }), ...(offset !== undefined && { skip: (offset as unknown) as number }), ...(sort !== undefined && sort_order !== undefined && { order: { [sort as keyof Puppy]: sort_order, }, }), loadRelationIds: true, where: { ...(id !== undefined && { id }), ...(name !== undefined && { name: Like(`%${name}%`) }), ...(gender !== undefined && { gender }), ...(date_of_birth !== undefined && { date_of_birth }), ...(weight !== undefined && { weight }), ...(user !== undefined && { user }), ...(specie !== undefined && { specie }), ...(created_at !== undefined && { created_at: Between( moment(`${created_at}T00:00:00.000`), moment(`${created_at}T23:59:59.999`) ), }), }, }) .then((puppies) => { // eslint-disable-next-line no-param-reassign puppies = puppies.filter( (puppy) => ArrayUtil.contains(puppy.breeds, breedsArray) && ArrayUtil.contains(puppy.personalities, personalitiesArray) ); logger.info(`Found ${puppies.length} Puppies`); ResponseHelper.send(res, HttpStatusCode.OK, puppies); }) .catch((ex) => { logger.warn(`Failed to find Puppies due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static findById(req: Request, res: Response): void { const { id } = req.params; getRepository(Puppy) .findOneOrFail(id, { loadRelationIds: true }) .then((puppy) => { logger.info(`Found Puppy ${puppy.id}`); ResponseHelper.send(res, HttpStatusCode.OK, puppy); }) .catch((ex) => { logger.warn(`Failed to find Puppy ${id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static create(req: Request, res: Response): void { const puppy: Puppy = req.app.locals.Puppy; puppy.user = getManager().create(User, { id: req.user?.id ? req.user.id : '' }); getCustomRepository(PuppyRepository) .saveOrFail(puppy) .then((newPuppy) => { logger.info(`Created Puppy ${newPuppy.id}`); ResponseHelper.send(res, HttpStatusCode.CREATED, newPuppy.id); }) .catch((ex) => { logger.warn(`Failed to create Puppy due to ${ex.message}`); if (ex instanceof DuplicateEntityError) ResponseHelper.send(res, HttpStatusCode.CONFLICT, ex.errors); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static update(req: Request, res: Response): void { const puppy: Puppy = req.app.locals.Puppy; puppy.id = Number.parseInt(req.params.id, 10); puppy.user = getManager().create(User, { id: req.user?.id ? req.user.id : '' }); getCustomRepository(PuppyRepository) .updateOrFail(puppy) .then((upPuppy) => { logger.info(`Updated Puppy ${upPuppy.id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn(`Failed to update Puppy ${puppy.id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else if (ex instanceof DuplicateEntityError) ResponseHelper.send(res, HttpStatusCode.CONFLICT, ex.errors); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static updateAvatar(req: Request, res: Response): void { const puppy: Puppy = getManager().create(Puppy, { id: Number.parseInt(req.params.id, 10), user: getManager().create(User, { id: req.user?.id ? req.user.id : '' }), }); getCustomRepository(PuppyRepository) .updateAvataOrFail(puppy, req.file) .then((upPuppy) => { logger.info(`Changed avatar for Puppy ${upPuppy.id} to ${upPuppy.avatar}`); ResponseHelper.send(res, HttpStatusCode.OK, upPuppy.avatar); }) .catch((ex) => { logger.error(`Failed to change avatar for Puppy ${puppy.id} due to ${ex.message}`); ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } public static delete(req: Request, res: Response): void { const puppy: Puppy = getManager().create(Puppy, { id: Number.parseInt(req.params.id, 10), user: getManager().create(User, { id: req.user?.id ? req.user.id : '' }), }); getCustomRepository(PuppyRepository) .deleteOrFail(puppy) .then(() => { logger.info(`Deleted Puppy ${puppy.id}`); ResponseHelper.send(res, HttpStatusCode.OK); }) .catch((ex) => { logger.warn(`Failed to delete Puppy ${puppy.id} due to ${ex.message}`); if (ex.name === 'EntityNotFound') ResponseHelper.send(res, HttpStatusCode.NOT_FOUND); else ResponseHelper.send(res, HttpStatusCode.INTERNAL_SERVER_ERROR); }); } } <file_sep>/src/common/error/DuplicateEntityError.ts export interface Duplicate { property: string; value: any; } export default class DuplicateError extends Error { public readonly errors: Duplicate[] | undefined; constructor(m: string, errors?: Duplicate[]) { super(m); this.errors = errors; Object.setPrototypeOf(this, new.target.prototype); } } <file_sep>/src/helper/JWTHelper.ts import jwt from 'jsonwebtoken'; // eslint-disable-next-line no-unused-vars import config from '@app/config'; // eslint-disable-next-line no-unused-vars import { types } from '@app/common'; export default class JWTHelper { public static sign(payload: types.JWT.Payload): Promise<string> { return Promise.resolve( jwt.sign(payload, config.SECURITY.JWT.SECRET, { expiresIn: config.SECURITY.JWT.EXPIRES_IN, }) ); } public static verify(token: string): Promise<types.JWT.Payload> { return new Promise((resolve, reject) => { jwt.verify(token, config.SECURITY.JWT.SECRET, (err, decoded) => { if (err) { reject(err); } else { resolve(decoded as types.JWT.Payload); } }); }); } } <file_sep>/src/common/validator/chain/index.ts export { default as IsStringArray } from './IsStringArray'; export { default as IsNumberArray } from './IsNumberArray'; <file_sep>/src/util/StringUtil.ts export default class StringUtil { public static readonly STRING_ARRAY_SEPARATOR = ','; public static toNumberArray(str: string | undefined): number[] { if (typeof str !== 'string' || str === undefined) return []; return str .toString() .split(this.STRING_ARRAY_SEPARATOR) .map((n) => Number.parseInt(n, 10)) .filter((n) => !Number.isNaN(n)); } } <file_sep>/src/db/entity/AnimalPlace.ts /* eslint-disable camelcase */ import { Entity, PrimaryColumn, Index, Column, CreateDateColumn, UpdateDateColumn, Check, } from 'typeorm'; import { IsEmpty } from 'class-validator'; export enum AnimalPlaceType { // eslint-disable-next-line no-unused-vars PARK = 'park', // eslint-disable-next-line no-unused-vars SHOP = 'shop', // eslint-disable-next-line no-unused-vars VETERINARY = 'veterinary', // eslint-disable-next-line no-unused-vars GROOMING = 'grooming', } @Entity('animal_place') @Check(`"id" > 0`) export default class AnimalPlace { @PrimaryColumn({ name: 'id' }) @Index() id!: number; @Column({ name: 'latitude', type: 'float8' }) @Index() latitude!: number; @Column({ name: 'longitude', type: 'float8' }) @Index() longitude!: number; @Column({ name: 'name' }) name!: string; @Column({ name: 'type', type: 'enum', enum: AnimalPlaceType }) @Index() type!: AnimalPlaceType; @CreateDateColumn({ name: 'created_at', select: false, update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; } <file_sep>/src/helper/httpStatusCode.ts export enum Status { // eslint-disable-next-line no-unused-vars SUCCESS = 'success', // eslint-disable-next-line no-unused-vars FAIL = 'fail', // eslint-disable-next-line no-unused-vars ERROR = 'error', } export default class HttpStatusCode { public readonly code: number; public readonly name: string; public readonly description: string; private constructor(code: number, name: string, description: string) { this.code = code; this.name = name; this.description = description; } public isSuccess(): boolean { return HttpStatusCode.isSuccess(this); } public static isSuccess(httpStatusCode: HttpStatusCode): boolean { return httpStatusCode.code >= 200 && httpStatusCode.code <= 299; } public status(): Status { return HttpStatusCode.status(this); } public static status(httpStatusCode: HttpStatusCode): Status { let status = Status.ERROR; if (HttpStatusCode.SUCCESSFUL_RESPONSES.has(httpStatusCode)) { status = Status.SUCCESS; } else if (HttpStatusCode.CLIENT_ERROR_RESPONSES.has(httpStatusCode)) { status = Status.FAIL; } return status; } public toString(): string { return JSON.stringify(this); } // HTTP Status Codes public static readonly CONTINUE = new HttpStatusCode( 100, 'Continue', 'This interim response indicates that everything so far is OK and that the client should continue with the request or ignore it if it is already finished' ); public static readonly SWITCHING_PROTOCOL = new HttpStatusCode( 101, 'Switching Protocol', 'This code is sent in response to an Upgrade request header by the client, and indicates the protocol the server is switching too' ); public static readonly PROCESSING = new HttpStatusCode( 102, 'Processing', 'This code indicates that the server has received and is processing the request, but no response is available yet' ); public static readonly EARLY_HINTS = new HttpStatusCode( 103, 'Early Hints', 'This status code is primarily intended to be used with the Link header, letting the user agent start preloading resources while the server prepares a response' ); public static readonly OK = new HttpStatusCode(200, 'Ok', 'The request has succeeded'); public static readonly CREATED = new HttpStatusCode( 201, 'Created', 'The request has succeeded and a new resource has been created as a result of it' ); public static readonly ACCEPTED = new HttpStatusCode( 202, 'Accepted', 'The request has been received but not yet acted upon' ); public static readonly NON_AUTHORITATIVE_INFORMATION = new HttpStatusCode( 203, 'Non-Authoritative Information', 'This response code means returned meta-information set is not exact set as available from the origin server, but collected from a local or a third party copy' ); public static readonly NO_CONTENT = new HttpStatusCode( 204, 'No Content', 'There is no content to send for this request, but the headers may be useful' ); public static readonly RESET_CONTENT = new HttpStatusCode( 205, 'Reset Content', 'This response code is sent after accomplishing request to tell user agent reset document view which sent this request' ); public static readonly PARTIAL_CONTENT = new HttpStatusCode( 206, 'Partial Content', 'This response code is used because of range header sent by the client to separate download into multiple streams' ); public static readonly MULTI_STATUS = new HttpStatusCode( 207, 'Multi-Status', 'Conveys information about multiple resources, for situations where multiple status codes might be appropriate' ); public static readonly ALREADY_REPORTED = new HttpStatusCode( 208, 'Already Reported', 'Used inside a <dav:propstat> response element to avoid repeatedly enumerating the internal members of multiple bindings to the same collection' ); public static readonly IM_USED = new HttpStatusCode( 226, 'IM Used', 'The server has fulfilled a GET request for the resource, and the response is a representation of the result of one or more instance-manipulations applied to the current instance' ); public static readonly MULTIPLE_CHOICE = new HttpStatusCode( 300, 'Multiple Choice', 'The request has more than one possible responses' ); public static readonly MOVED_PERMANENTLY = new HttpStatusCode( 301, 'Moved Permanently', 'This response code means that URI of requested resource has been changed' ); public static readonly FOUND = new HttpStatusCode( 302, 'Found', 'This response code means that URI of requested resource has been changed temporarily' ); public static readonly SEE_OTHER = new HttpStatusCode( 303, 'See Other', 'Server sent this response to directing client to get requested resource to another URI with an GET request' ); public static readonly NOT_MODIFIED = new HttpStatusCode( 304, 'Multiple Choice', 'Used for caching purposes, it is telling to client that response has not been modified' ); public static readonly TEMPORARY_REDIRECT = new HttpStatusCode( 307, 'Temporary Redirect', 'Server sent this response to directing client to get requested resource to another URI with same method that used prior request' ); public static readonly PERMANENT_REDIRECT = new HttpStatusCode( 308, 'Permanent Redirect', 'This means that the resource is now permanently located at another URI, specified by the Location: HTTP Response header' ); public static readonly BAD_REQUEST = new HttpStatusCode( 400, 'Bad Request', 'This response means that server could not understand the request due to invalid syntax' ); public static readonly UNAUTHORIZED = new HttpStatusCode( 401, 'Unauthorized', 'Although the HTTP standard specifies "unauthorized", semantically this response means "unauthenticated"' ); public static readonly PAYMENT_REQUIRED = new HttpStatusCode( 402, 'Payment Required', 'This response code is reserved for future use' ); public static readonly FORBIDDEN = new HttpStatusCode( 403, 'Forbidden', 'The client does not have access rights to the content, i.e. they are unauthorized, so server is rejecting to give proper response' ); public static readonly NOT_FOUND = new HttpStatusCode( 404, 'Not Found', 'The server can not find requested resource' ); public static readonly METHOD_NOT_ALLOWED = new HttpStatusCode( 405, 'Method Not Allowed', 'The request method is known by the server but has been disabled and cannot be used' ); public static readonly NOT_ACCEPTABLE = new HttpStatusCode( 406, 'Not Acceptable', "This response is sent when the web server, after performing server-driven content negotiation, doesn't find any content following the criteria given by the user agent" ); public static readonly PROXY_AUTHENTICATION_REQUIRED = new HttpStatusCode( 407, 'Proxy Authentication Required', 'This is similar to 401 but authentication is needed to be done by a proxy' ); public static readonly REQUEST_TIMEOUT = new HttpStatusCode( 408, 'Request Timeout', 'This response is sent on an idle connection by some servers, even without any previous request by the client' ); public static readonly CONFLICT = new HttpStatusCode( 409, 'Conflict', 'This response is sent when a request conflicts with the current state of the server' ); public static readonly GONE = new HttpStatusCode( 410, 'Gone', 'This response would be sent when the requested content has been permenantly deleted from server, with no forwarding address' ); public static readonly LENGTH_REQUIRED = new HttpStatusCode( 411, 'Length Required', 'Server rejected the request because the Content-Length header field is not defined and the server requires it' ); public static readonly PRECONDITION_FAILED = new HttpStatusCode( 412, 'Precondition Failed', 'The client has indicated preconditions in its headers which the server does not meet' ); public static readonly PAYLOAD_TOO_LARGE = new HttpStatusCode( 413, 'Payload Too Large', 'Request entity is larger than limits defined by server; the server might close the connection or return an Retry-After header field' ); public static readonly URI_TOO_LONG = new HttpStatusCode( 414, 'URI Too Long', 'The URI requested by the client is longer than the server is willing to interpret' ); public static readonly UNSUPPORTED_MEDIA_TYPE = new HttpStatusCode( 415, 'Unsupported Media Type', 'The media format of the requested data is not supported by the server, so the server is rejecting the request' ); public static readonly RANGE_NOT_SATISFIABLE = new HttpStatusCode( 416, 'Range Not Satisfiable', "The range specified by the Range header field in the request can't be fulfilled; it's possible that the range is outside the size of the target URI's data" ); public static readonly EXPECTATION_FAILED = new HttpStatusCode( 417, 'Expectation Failed', "This response code means the expectation indicated by the Expect request header field can't be met by the server" ); public static readonly I_AM_A_TEAPOT = new HttpStatusCode( 418, 'Expectation Failed', 'The server refuses the attempt to brew coffee with a teapot' ); public static readonly MISDIRECTED_REQUEST = new HttpStatusCode( 421, 'Misdirected Request', 'The request was directed at a server that is not able to produce a response' ); public static readonly UNPROCESSABLE_ENTITY = new HttpStatusCode( 422, 'Unprocessable Entity', 'The request was well-formed but was unable to be followed due to semantic errors' ); public static readonly LOCKED = new HttpStatusCode( 423, 'Locked', 'The resource that is being accessed is locked' ); public static readonly FAILED_DEPENDENCY = new HttpStatusCode( 424, 'Failed Dependency', 'The request failed due to failure of a previous request' ); public static readonly TOO_EARLY = new HttpStatusCode( 425, 'Too Early', 'Indicates that the server is unwilling to risk processing a request that might be replayed' ); public static readonly UPGRADE_REQUIRED = new HttpStatusCode( 426, 'Upgrade Required', 'The server refuses to perform the request using the current protocol but might be willing to do so after the client upgrades to a different protocol' ); public static readonly PRECONDITION_REQUIRED = new HttpStatusCode( 428, 'Precondition Required', 'The origin server requires the request to be conditional' ); public static readonly TOO_MANY_REQUESTS = new HttpStatusCode( 429, 'Too Many Requests', 'The user has sent too many requests in a given amount of time ("rate limiting")' ); public static readonly REQUEST_HEADER_FIELDS_TOO_LARGE = new HttpStatusCode( 431, 'Request Header Fields Too Large', 'The server is unwilling to process the request because its header fields are too large' ); public static readonly UNAVAILABLE_FOR_LEGAL_REASONS = new HttpStatusCode( 451, 'Unavailable For Legal Reasons', 'The user requests an illegal resource, such as a web page censored by a government' ); public static readonly INTERNAL_SERVER_ERROR = new HttpStatusCode( 500, 'Internal Server Error', "The server has encountered a situation it doesn't know how to handle" ); public static readonly NOT_IMPLEMENTED = new HttpStatusCode( 501, 'Not Implemented', 'The request method is not supported by the server and cannot be handled' ); public static readonly BAD_GATEWAY = new HttpStatusCode( 502, 'Bad Gateway', 'This error response means that the server, while working as a gateway to get a response needed to handle the request, got an invalid response' ); public static readonly SERVICE_UNAVAILABLE = new HttpStatusCode( 503, 'Service Unavailable', 'The server is not ready to handle the request' ); public static readonly GATEWAY_TIMEOUT = new HttpStatusCode( 504, 'Gateway Timeout', 'This error response is given when the server is acting as a gateway and cannot get a response in time' ); public static readonly HTTP_VERSION_NOT_SUPPORTED = new HttpStatusCode( 505, 'HTTP Version Not Supported', 'The HTTP version used in the request is not supported by the server' ); public static readonly VARIANT_ALSO_NEGOTIATES = new HttpStatusCode( 506, 'Variant Also Negotiates', 'The server has an internal configuration error: transparent content negotiation for the request results in a circular reference' ); public static readonly INSUFFICIENT_STORAGE = new HttpStatusCode( 507, 'Insufficient Storage', 'The server has an internal configuration error: the chosen variant resource is configured to engage in transparent content negotiation itself, and is therefore not a proper end point in the negotiation process' ); public static readonly LOOP_DETECTED = new HttpStatusCode( 508, 'Loop Detected', 'The server detected an infinite loop while processing the request' ); public static readonly NOT_EXTENDED = new HttpStatusCode( 510, 'Not Extended', 'Further extensions to the request are required for the server to fulfill it' ); public static readonly NETWORK_AUTHENTICATION_REQUIRED = new HttpStatusCode( 511, 'Network Authentication Required', 'The 511 status code indicates that the client needs to authenticate to gain network access' ); // END HTTP Status Codes // Informational Responses public static readonly INFORMATIONAL_RESPONSES: Set<HttpStatusCode> = new Set([ HttpStatusCode.CONTINUE, HttpStatusCode.SWITCHING_PROTOCOL, HttpStatusCode.PROCESSING, HttpStatusCode.EARLY_HINTS, ]); // END Informational Responses // Successful Responses public static readonly SUCCESSFUL_RESPONSES: Set<HttpStatusCode> = new Set([ HttpStatusCode.OK, HttpStatusCode.CREATED, HttpStatusCode.ACCEPTED, HttpStatusCode.NON_AUTHORITATIVE_INFORMATION, HttpStatusCode.NO_CONTENT, HttpStatusCode.RESET_CONTENT, HttpStatusCode.PARTIAL_CONTENT, HttpStatusCode.MULTI_STATUS, HttpStatusCode.ALREADY_REPORTED, HttpStatusCode.IM_USED, ]); // END Successful Responses // Redirection Messages public static readonly REDIRECTION_MESSAGES: Set<HttpStatusCode> = new Set([ HttpStatusCode.MULTIPLE_CHOICE, HttpStatusCode.MOVED_PERMANENTLY, HttpStatusCode.FOUND, HttpStatusCode.SEE_OTHER, HttpStatusCode.NOT_MODIFIED, HttpStatusCode.TEMPORARY_REDIRECT, HttpStatusCode.PERMANENT_REDIRECT, ]); // END Redirection messages // Client Error Responses public static readonly CLIENT_ERROR_RESPONSES: Set<HttpStatusCode> = new Set([ HttpStatusCode.BAD_REQUEST, HttpStatusCode.UNAUTHORIZED, HttpStatusCode.PAYMENT_REQUIRED, HttpStatusCode.FORBIDDEN, HttpStatusCode.NOT_FOUND, HttpStatusCode.METHOD_NOT_ALLOWED, HttpStatusCode.NOT_ACCEPTABLE, HttpStatusCode.PROXY_AUTHENTICATION_REQUIRED, HttpStatusCode.REQUEST_TIMEOUT, HttpStatusCode.CONFLICT, HttpStatusCode.GONE, HttpStatusCode.LENGTH_REQUIRED, HttpStatusCode.PRECONDITION_FAILED, HttpStatusCode.PAYLOAD_TOO_LARGE, HttpStatusCode.URI_TOO_LONG, HttpStatusCode.UNSUPPORTED_MEDIA_TYPE, HttpStatusCode.RANGE_NOT_SATISFIABLE, HttpStatusCode.EXPECTATION_FAILED, HttpStatusCode.I_AM_A_TEAPOT, HttpStatusCode.MISDIRECTED_REQUEST, HttpStatusCode.UNPROCESSABLE_ENTITY, HttpStatusCode.LOCKED, HttpStatusCode.FAILED_DEPENDENCY, HttpStatusCode.TOO_EARLY, HttpStatusCode.UPGRADE_REQUIRED, HttpStatusCode.PRECONDITION_REQUIRED, HttpStatusCode.TOO_MANY_REQUESTS, HttpStatusCode.REQUEST_HEADER_FIELDS_TOO_LARGE, HttpStatusCode.UNAVAILABLE_FOR_LEGAL_REASONS, ]); // END Client Error Responses // Server error responses public static readonly SERVER_ERROR_RESPONSES: Set<HttpStatusCode> = new Set([ HttpStatusCode.INTERNAL_SERVER_ERROR, HttpStatusCode.NOT_IMPLEMENTED, HttpStatusCode.BAD_GATEWAY, HttpStatusCode.SERVICE_UNAVAILABLE, HttpStatusCode.GATEWAY_TIMEOUT, HttpStatusCode.HTTP_VERSION_NOT_SUPPORTED, HttpStatusCode.VARIANT_ALSO_NEGOTIATES, HttpStatusCode.INSUFFICIENT_STORAGE, HttpStatusCode.LOOP_DETECTED, HttpStatusCode.NOT_EXTENDED, HttpStatusCode.NETWORK_AUTHENTICATION_REQUIRED, ]); // END Server error responses // HTTP Status Codes list public static readonly HTTP_STATUS_CODES: Set<HttpStatusCode> = new Set([ ...HttpStatusCode.INFORMATIONAL_RESPONSES, ...HttpStatusCode.SUCCESSFUL_RESPONSES, ...HttpStatusCode.REDIRECTION_MESSAGES, ...HttpStatusCode.CLIENT_ERROR_RESPONSES, ...HttpStatusCode.SERVER_ERROR_RESPONSES, ]); // END HTPP Status Codes list } <file_sep>/src/db/entity/UserVerification.ts /* eslint-disable camelcase */ import { Entity, OneToOne, JoinColumn, Column, CreateDateColumn, UpdateDateColumn } from 'typeorm'; import { IsNumberString, Length, IsUUID, IsEmpty } from 'class-validator'; import config from '@app/config'; import User from './User'; @Entity('user_verification') export default class UserVerification { @OneToOne(() => User, { primary: true, onDelete: 'CASCADE' }) @JoinColumn({ name: 'user_id' }) @IsUUID() user!: User; @Column({ name: 'otp_email', length: config.SECURITY.OTP.EMAIL.DIGITS }) @IsNumberString() @Length(config.SECURITY.OTP.EMAIL.DIGITS, config.SECURITY.OTP.EMAIL.DIGITS) otp_email!: string; @Column({ name: 'otp_phone', length: config.SECURITY.OTP.PHONE.DIGITS }) @IsNumberString() @Length(config.SECURITY.OTP.PHONE.DIGITS, config.SECURITY.OTP.PHONE.DIGITS) otp_phone!: string; @CreateDateColumn({ name: 'created_at', select: false, update: false }) @IsEmpty({ always: true }) created_at!: Date; @UpdateDateColumn({ name: 'updated_at', select: false }) @IsEmpty({ always: true }) updated_at!: Date; } <file_sep>/src/common/error/UserNotVerifiedError.ts export default class UserNotVerifiedError extends Error { public readonly id: string | undefined; constructor(m: string, id?: string) { super(m); this.id = id; Object.setPrototypeOf(this, new.target.prototype); } } <file_sep>/src/util/index.ts export { default as EnvUtil } from './EnvUtil'; export { default as OTPUtil } from './OTPUtil'; export { default as CryptUtil } from './CryptUtil'; export { default as EntityUtil } from './EntityUtil'; export { default as StringUtil } from './StringUtil'; export { default as ArrayUtil } from './ArrayUtil'; export { default as GisUtil } from './GisUtil'; <file_sep>/src/route/api/v1/auth/user_friend.ts import { Router } from 'express'; import { checkSchema } from 'express-validator'; import { UserFriendController } from '@app/controller'; import { ValidatorMiddleware } from '@app/middleware'; import UserFriend, { UserFriendValidationGroup, UserFriendType } from '@app/db/entity/UserFriend'; const router = Router(); router.get( '', ValidatorMiddleware.validateChain( checkSchema({ limit: { in: ['query'], isInt: true, toInt: true, optional: true, }, offset: { in: ['query'], isInt: true, toInt: true, optional: true, }, sort: { in: ['query'], isString: true, optional: true, }, sort_order: { in: ['query'], isString: true, isIn: { options: ['ASC, DESC'], }, optional: true, }, friend: { in: ['query'], isUUID: true, optional: true, }, type: { in: ['query'], isIn: { options: [Object.values(UserFriendType)] }, optional: true, }, created_at: { in: ['query'], isISO8601: true, optional: true, }, }) ), UserFriendController.find ); router.get( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isUUID: true, errorMessage: 'Invalid User Friend id', }, }) ), UserFriendController.findById ); router.post( '', ValidatorMiddleware.validateClass(UserFriend, UserFriendValidationGroup.CREATION), UserFriendController.create ); router.patch( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isUUID: true, errorMessage: 'Invalid User Friend id', }, }) ), ValidatorMiddleware.validateClass(UserFriend, UserFriendValidationGroup.UPDATE), UserFriendController.update ); router.delete( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isUUID: true, errorMessage: 'Invalid User Friend id', }, }) ), UserFriendController.delete ); export default router; <file_sep>/src/route/api/v1/auth/animal_specie.ts import { Router } from 'express'; import { checkSchema } from 'express-validator'; import { ValidatorMiddleware } from '@app/middleware'; import { AnimalSpecieController } from '@app/controller'; import { IsNumberArray } from '@app/common/validator/chain'; const router = Router(); router.get( '', ValidatorMiddleware.validateChain( checkSchema({ limit: { in: ['query'], isInt: true, toInt: true, optional: true, }, offset: { in: ['query'], isInt: true, toInt: true, optional: true, }, sort: { in: ['query'], isString: true, optional: true, }, sort_order: { in: ['query'], isString: true, isIn: { options: ['ASC, DESC'], }, optional: true, }, id: { in: ['query'], isInt: true, toInt: true, optional: true, }, name: { in: ['query'], isString: true, optional: true, }, breeds: { in: ['query'], isString: true, custom: { options: IsNumberArray, }, optional: true, }, }) ), AnimalSpecieController.find ); router.get( '/:id', ValidatorMiddleware.validateChain( checkSchema({ id: { in: ['params'], isInt: true, errorMessage: 'Invalid Animal Specie id', }, }) ), AnimalSpecieController.findById ); export default router; <file_sep>/src/service/PhoneService.ts import { Twilio } from 'twilio'; import { // eslint-disable-next-line no-unused-vars MessageInstance, // eslint-disable-next-line no-unused-vars MessageListInstanceCreateOptions, } from 'twilio/lib/rest/api/v2010/account/message'; import config from '@app/config'; import logger from '@app/logger'; import { ConfigurationError } from '@app/common/error'; export default class PhoneService { private static transport: Twilio; private static configured: boolean = false; public static configure(): void { if (this.configured) return; this.transport = new Twilio(config.SERVICE.PHONE.SID, config.SERVICE.PHONE.TOKEN); this.configured = true; logger.info('Phone service configured'); } public static send(phoneOptions: MessageListInstanceCreateOptions): Promise<MessageInstance> { if (!this.configured) throw new ConfigurationError('Phone Service is not configured'); return new Promise((resolve, reject) => { this.transport.messages .create(phoneOptions) .then((message) => { logger.info(`Sended phone message ${JSON.stringify(message)}`); resolve(message); }) .catch((ex) => { logger.error(`Error sending phone message due to ${ex.message}`); reject(ex); }); }); } } <file_sep>/src/common/validator/IsValidAnimalPersonalityArray.ts /* eslint-disable class-methods-use-this */ import { ValidatorConstraint, // eslint-disable-next-line no-unused-vars ValidatorConstraintInterface, // eslint-disable-next-line no-unused-vars ValidationOptions, registerDecorator, // eslint-disable-next-line no-unused-vars ValidationArguments, } from 'class-validator'; import { getManager } from 'typeorm'; import Personality from '@app/db/entity/AnimalPersonality'; @ValidatorConstraint({ async: true }) export class IsValidAnimalPersonalityArrayConstraint implements ValidatorConstraintInterface { private invalidIds: number[] = []; async validate(ids: number[]) { if (ids.length === 0) return true; const validIds: number[] = await getManager() .find(Personality, { select: ['id'], where: ids.map((id) => { return { id }; }), }) .then((validPersonalities) => validPersonalities.map((personality) => personality.id)); this.invalidIds = ids.filter((id) => validIds.indexOf(id) === -1); return this.invalidIds.length === 0; } defaultMessage(args: ValidationArguments) { return `${args.property} must contain valid identifiers, ${JSON.stringify(this.invalidIds)} ${ this.invalidIds.length === 1 ? 'is' : 'are' } unknown`; } } export default function IsValidAnimalPersonalityArray(validationOptions?: ValidationOptions) { return (object: Object, propertyName: string) => { registerDecorator({ name: 'isValidAnimalPersonalityArray', target: object.constructor, propertyName, options: validationOptions, validator: IsValidAnimalPersonalityArrayConstraint, }); }; } <file_sep>/src/util/OTPUtil.ts export default class OTPUtil { public static readonly DIGITS = '0123456789'; public static readonly ALPHABET: string = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; public static digits(length: number): Promise<string> { let otp = ''; for (let i = 0; i < length; i += 1) { otp += this.DIGITS.charAt(Math.floor(Math.random() * this.DIGITS.length)); } return Promise.resolve(otp); } public static alphanumerical(length: number): Promise<string> { let otp = ''; for (let i = 0; i < length; i += 1) { otp += this.ALPHABET.charAt(Math.floor(Math.random() * this.ALPHABET.length)); } return Promise.resolve(otp); } } <file_sep>/src/util/EnvUtil.ts import config from '@app/config'; export default class EnvUtil { public static isProduction(): boolean { return config.NODE.ENV === 'production'; } public static isDevelopment(): boolean { return config.NODE.ENV === 'development'; } } <file_sep>/src/db/repository/UserRepository.ts import { AbstractRepository, EntityRepository, // eslint-disable-next-line no-unused-vars FindOneOptions, // eslint-disable-next-line no-unused-vars ObjectID, // eslint-disable-next-line no-unused-vars FindConditions, // eslint-disable-next-line no-unused-vars DeleteResult, // eslint-disable-next-line no-unused-vars EntityManager, // eslint-disable-next-line no-unused-vars SaveOptions, Not, } from 'typeorm'; // eslint-disable-next-line no-unused-vars import User from '@app/db/entity/User'; import { EntityUtil, CryptUtil } from '@app/util'; // eslint-disable-next-line no-unused-vars import { DuplicateEntityError, UserNotVerifiedError } from '@app/common/error'; // eslint-disable-next-line no-unused-vars import { Duplicate } from '@app/common/error/DuplicateEntityError'; import { JWTHelper } from '@app/helper'; import ImageService, { ImageType } from '@app/service/ImageService'; import UserVerificationRepository from './UserVerificationRepository'; @EntityRepository(User) export default class UserRepository extends AbstractRepository<User> { public async signInOrFail(user: User): Promise<string> { const foundUser: User = await this.manager.findOneOrFail( User, { username: user.username }, { select: ['id', 'username', 'password', 'role', 'verified'] } ); await CryptUtil.compareOrFail(user.password, foundUser.password); if (!foundUser.verified) throw new UserNotVerifiedError('User not verified', foundUser.id); return JWTHelper.sign({ id: foundUser.id, role: foundUser.role, }); } public async findOneAndVerifiedOrFail( optionsOrConditions?: | string | number | Date | ObjectID | FindOneOptions<User> | FindConditions<User>, maybeOptions?: FindOneOptions<User> ): Promise<User> { const selectableColumns = EntityUtil.selectableColumns(User, ['verified']); if (maybeOptions === undefined) { // eslint-disable-next-line no-param-reassign maybeOptions = { select: selectableColumns, }; } else if (maybeOptions.select === undefined) { // eslint-disable-next-line no-param-reassign maybeOptions = Object.assign(maybeOptions, { select: selectableColumns, }); } else if (Array.isArray(maybeOptions.select)) { maybeOptions.select.push('verified'); } const user: User = await this.manager.findOneOrFail( User, optionsOrConditions as any, maybeOptions ); if (!user.verified) throw new UserNotVerifiedError('User not verified'); return Promise.resolve(user); } public saveOrFail(user: User, entityManager?: EntityManager): Promise<User> { const callback = async (em: EntityManager) => { const newUser: User = await UserRepository.saveUnique(user, em); await em.getCustomRepository(UserVerificationRepository).saveOrFail(newUser, em); return Promise.resolve(newUser); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public updateOrFail(user: User, entityManager?: EntityManager): Promise<User> { const callback = async (em: EntityManager) => { const userToUpdate: User = await em.findOneOrFail(User, user.id); await em.merge(User, userToUpdate, user); return UserRepository.updateUnique(userToUpdate, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public updateAvataOrFail( user: User, avatar: Express.Multer.File, entityManager?: EntityManager ): Promise<User> { const callback = async (em: EntityManager) => { const avatarResult = await ImageService.upload(avatar, { type: ImageType.AVATAR, folder: 'user/avatar', }); // eslint-disable-next-line no-param-reassign user.avatar = avatarResult.secure_url; return this.updateOrFail(user, em); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } public deleteOrFail(user: User, entityManager?: EntityManager): Promise<DeleteResult> { const callback = async (em: EntityManager) => { await em.findOneOrFail(User, user.id); return em.delete(User, user.id); }; return entityManager === undefined ? this.manager.transaction(callback) : callback(entityManager); } private static async saveUnique( user: User, entityManager: EntityManager, saveOptions?: SaveOptions, isUpdateOperation?: boolean ): Promise<User> { const duplicateFields = new Set<Duplicate>(); const uniqueColumns = EntityUtil.uniqueColumns(User); const whereConditions = uniqueColumns.map((u) => { return { [u]: user[u], ...(isUpdateOperation === true && { id: Not(user.id), }), }; }); const duplicateEntities = await entityManager.find(User, { where: whereConditions, select: uniqueColumns, }); duplicateEntities.forEach((_user) => { uniqueColumns.forEach((u) => { if (user[u] === _user[u]) { duplicateFields.add({ property: u.toString(), value: user[u] }); } }); }); if ( !isUpdateOperation && (await entityManager.findOne(User, { where: { id: user.id, }, })) !== undefined ) { duplicateFields.add({ property: `id`, value: user.id, }); } if (duplicateFields.size !== 0) throw new DuplicateEntityError(`Duplicate User entity found`, Array.from(duplicateFields)); return entityManager.save(User, user, saveOptions); } private static updateUnique( user: User, entityManager: EntityManager, saveOptions?: SaveOptions ): Promise<User> { return this.saveUnique(user, entityManager, saveOptions, true); } } <file_sep>/src/middleware/ValidatorMiddleware.ts // eslint-disable-next-line no-unused-vars import { Response, Request, NextFunction } from 'express'; // eslint-disable-next-line no-unused-vars import { ValidationChain, validationResult } from 'express-validator'; // eslint-disable-next-line no-unused-vars import { transformAndValidate, ClassType } from 'class-transformer-validator'; import logger from '@app/logger'; import { ResponseHelper, HttpStatusCode } from '@app/helper'; import { EmptyFileError } from '@app/common/error'; export default class ValidatorMiddleware { public static validateChain(validations: ValidationChain[]) { return async (req: Request, res: Response, next: NextFunction) => { await Promise.all(validations.map((validation) => validation.run(req))); const errors = validationResult(req); if (errors.isEmpty()) next(); else { logger.warn(`Validation chain failed due to ${JSON.stringify(errors.array())}`); ResponseHelper.send(res, HttpStatusCode.UNPROCESSABLE_ENTITY, errors.array()); } }; } public static validateClass<T extends object>( classType: ClassType<T>, validationGroup: string | string[] = [] ) { return (req: Request, res: Response, next: NextFunction) => { transformAndValidate(classType, req.body, { validator: { groups: Array.isArray(validationGroup) ? validationGroup : [validationGroup], forbidUnknownValues: true, validationError: { target: false, value: true, }, }, }) .then((_class) => this.addClassToLocals(req, _class)) .then(() => { next(); }) .catch((ex) => { logger.warn(`Validation failed for class ${classType.name} due to ${JSON.stringify(ex)}`); ResponseHelper.send(res, HttpStatusCode.UNPROCESSABLE_ENTITY, ex); }); }; } public static validateFileSingle(fieldName: string) { return (req: Request, _res: Response, next: NextFunction) => { if (req.file) next(); else next(new EmptyFileError(`No file found in field named ${fieldName}`, fieldName)); }; } private static addClassToLocals<T extends object>(req: Request, _class: T): Promise<void> { req.app.locals[_class.constructor.name] = _class; return Promise.resolve(); } } <file_sep>/src/app.ts import 'reflect-metadata'; import path from 'path'; // eslint-disable-next-line no-unused-vars import { createConnection, ConnectionOptions } from 'typeorm'; import config from '@app/config'; // Always First! import logger from '@app/logger'; import Server from '@app/server'; createConnection(<ConnectionOptions>{ type: config.DATABASE.TYPE, url: config.DATABASE.URL, extra: { ssl: config.DATABASE.SSL, }, synchronize: config.DATABASE.SYNCHRONIZE, logging: config.DATABASE.LOGGING, entities: [path.join(__dirname, config.DATABASE.ENTITIES)], migrations: [path.join(__dirname, config.DATABASE.MIGRATIONS)], subscribers: [path.join(__dirname, config.DATABASE.SUBSCRIBERS)], }) .then(() => { logger.info(`Database connected`); return Server.getInstance().listen(config.NODE.PORT); }) .then((addressInfo) => { logger.info(`Server running at ${addressInfo.address} on port ${addressInfo.port}`); }) .catch((ex) => { logger.error(ex.message); process.exit(1); }); <file_sep>/src/config/config.ts import envalid, { str, port, bool, num, url } from 'envalid'; import logger from '@app/logger'; export interface Configuration { NODE: { ENV: string; PORT: number; }; DATABASE: { TYPE: string; URL: string; SSL: boolean; SYNCHRONIZE: boolean; LOGGING: boolean; ENTITIES: string; MIGRATIONS: string; SUBSCRIBERS: string; }; SERVICE: { EMAIL: { HOST: string; PORT: number; SECURE: boolean; USERNAME: string; PASSWORD: string; }; PHONE: { SID: string; TOKEN: string; NUMBER: string; }; IMAGE: { CLOUD: string; KEY: string; SECRET: string; }; }; SECURITY: { OTP: { EMAIL: { DIGITS: number; }; PHONE: { DIGITS: number; }; }; TOKEN: { PASSWORD: { LENGTH: number; EXPIRES_IN: number; }; }; BCRYPT: { SALT_ROUNS: number; }; JWT: { SECRET: string; EXPIRES_IN: string; }; }; } const cleanConfig = envalid.cleanEnv( process.env, { NODE_ENV: str({ default: 'production', choices: ['production', 'development'] }), PORT: port({ devDefault: 8080 }), DATABASE_URL: url(), DATABASE_SSL: bool({ default: true, devDefault: false }), DATABASE_SYNCHRONIZE: bool({ default: false, devDefault: true }), DATABASE_LOGGING: bool({ default: false }), SERVICE_EMAIL_HOST: str(), SERVICE_EMAIL_PORT: port(), SERVICE_EMAIL_SECURE: bool({ default: true, devDefault: false }), SERVICE_EMAIL_USERNAME: str(), SERVICE_EMAIL_PASSWORD: str(), SERVICE_PHONE_SID: str(), SERVICE_PHONE_TOKEN: str(), SERVICE_PHONE_NUMBER: str(), SERVICE_IMAGE_CLOUD: str(), SERVICE_IMAGE_KEY: str(), SERVICE_IMAGE_SECRET: str(), SECURITY_BCRYPT_SALT_ROUNDS: num({ default: 12, choices: [12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32], }), SECURITY_JWT_SECRET: str(), SECURITY_JWT_EXPIRES_IN: str({ default: '32d', choices: ['1d', '2d', '4d', '8d', '16d', '32d'], }), }, { strict: true, } ); logger.debug('Environment variables loaded'); const config: Configuration = { NODE: { ENV: cleanConfig.NODE_ENV, PORT: cleanConfig.PORT, }, DATABASE: { TYPE: 'postgres', URL: cleanConfig.DATABASE_URL, SSL: cleanConfig.DATABASE_SSL, SYNCHRONIZE: cleanConfig.DATABASE_SYNCHRONIZE, LOGGING: cleanConfig.DATABASE_LOGGING, ENTITIES: `./db/entity/**/*.${cleanConfig.NODE_ENV === 'production' ? 'js' : 'ts'}`, MIGRATIONS: `./db/migration/**/*.${cleanConfig.NODE_ENV === 'production' ? 'js' : 'ts'}`, SUBSCRIBERS: `/./db/subscriber/**/*.${cleanConfig.NODE_ENV === 'production' ? 'js' : 'ts'}`, }, SERVICE: { EMAIL: { HOST: cleanConfig.SERVICE_EMAIL_HOST, PORT: cleanConfig.SERVICE_EMAIL_PORT, SECURE: cleanConfig.SERVICE_EMAIL_SECURE, USERNAME: cleanConfig.SERVICE_EMAIL_USERNAME, PASSWORD: cleanConfig.SERVICE_EMAIL_PASSWORD, }, PHONE: { SID: cleanConfig.SERVICE_PHONE_SID, TOKEN: cleanConfig.SERVICE_PHONE_TOKEN, NUMBER: cleanConfig.SERVICE_PHONE_NUMBER, }, IMAGE: { CLOUD: cleanConfig.SERVICE_IMAGE_CLOUD, KEY: cleanConfig.SERVICE_IMAGE_KEY, SECRET: cleanConfig.SERVICE_IMAGE_SECRET, }, }, SECURITY: { OTP: { EMAIL: { DIGITS: 5 }, PHONE: { DIGITS: 5 }, }, TOKEN: { PASSWORD: { LENGTH: 32, EXPIRES_IN: 30 }, }, BCRYPT: { SALT_ROUNS: cleanConfig.SECURITY_BCRYPT_SALT_ROUNDS, }, JWT: { SECRET: cleanConfig.SECURITY_JWT_SECRET, EXPIRES_IN: cleanConfig.SECURITY_JWT_EXPIRES_IN, }, }, }; logger.info('Configuration object constructed'); export default config; <file_sep>/src/common/validator/HasNoWhitespace.ts /* eslint-disable class-methods-use-this */ import { registerDecorator, // eslint-disable-next-line no-unused-vars ValidationOptions, // eslint-disable-next-line no-unused-vars ValidatorConstraintInterface, ValidatorConstraint, // eslint-disable-next-line no-unused-vars ValidationArguments, } from 'class-validator'; @ValidatorConstraint() export class HasNoWhitespaceConstraint implements ValidatorConstraintInterface { validate(value: string) { return typeof value === 'string' && !/\s/.test(value); } defaultMessage(args: ValidationArguments) { return `${args.property} must not contains any whitespace`; } } export default function HasNoWhitespace(validationOptions?: ValidationOptions) { return (object: Object, propertyName: string) => { registerDecorator({ name: 'hasNoWhitespace', target: object.constructor, propertyName, options: validationOptions, validator: HasNoWhitespaceConstraint, }); }; } <file_sep>/script/build.ts // eslint-disable-next-line import/no-extraneous-dependencies import shell from 'shelljs'; import * as build from '../build.json'; // Copy Folders build.copy.folders.forEach((folder) => { shell.cp('-R', folder, build.target); }); // Copy Files build.copy.files.forEach((file) => { shell.cp(file, build.target); }); <file_sep>/src/server/Server.ts import path from 'path'; // eslint-disable-next-line no-unused-vars import { AddressInfo } from 'net'; import express from 'express'; import compression from 'compression'; import helmet from 'helmet'; import favicon from 'serve-favicon'; import bodyParser from 'body-parser'; import cors from 'cors'; import exphbs from 'express-handlebars'; import hbshelpers from 'handlebars-helpers'; import logger from '@app/logger'; import routes from '@app/route'; import { NotFoundMiddleware, ErrorMiddleware } from '@app/middleware'; import { EmailService, PhoneService, ImageService } from '@app/service'; export default class Server { public static readonly DEFAULT_PORT = 0; private static instance: Server; private readonly server: express.Application; private addressInfo!: AddressInfo; private constructor() { this.server = express(); logger.debug('Server initialized'); this.configure(); logger.info('Server configured'); } private configure(): void { this.server .options('*', cors()) .use(cors()) .enable('trust proxy') .engine( '.hbs', exphbs.create({ extname: '.hbs', layoutsDir: path.join(__dirname, '../view/site/layout'), partialsDir: path.join(__dirname, '../view/site/partial'), helpers: hbshelpers(), }).engine ) .set('view engine', '.hbs') .set('views', path.join(__dirname, '../view/site')) .use(compression()) .use(helmet()) .use(bodyParser.json()) .use(bodyParser.urlencoded({ extended: true })) .use(favicon(path.join(__dirname, '../public', 'favicon.ico'))) .use('/', express.static(path.join(__dirname, '../public'))) .use('/', routes) .use(NotFoundMiddleware.handle) .use(ErrorMiddleware.handle); EmailService.configure(); PhoneService.configure(); ImageService.configure(); } public static getInstance(): Server { if (!Server.instance) { Server.instance = new Server(); logger.debug('Server instantiated'); } return Server.instance; } public listen(port: number = Server.DEFAULT_PORT): Promise<AddressInfo> { return new Promise((resolve, reject) => { const serverListener = this.server .listen(port, () => { this.addressInfo = serverListener.address() as AddressInfo; resolve(this.addressInfo); }) .on('error', (ex) => { reject(ex); }); }); } public getAddressInfo(): AddressInfo { return this.addressInfo; } } <file_sep>/src/route/site/legal/index.ts // eslint-disable-next-line no-unused-vars import { Router, Request, Response } from 'express'; const router = Router(); router.get('/privacy', (_req: Request, res: Response) => { res.render('legal/privacy', { title: 'Privacy Policy', }); }); router.get('/terms', (_req: Request, res: Response) => { res.render('legal/terms', { title: 'Terms of Service', }); }); router.get('/eula', (_req: Request, res: Response) => { res.render('legal/eula', { title: 'EULA', }); }); export default router; <file_sep>/src/controller/index.ts export { default as UserController } from './UserController'; export { default as UserVerificationController } from './UserVerificationController'; export { default as UserFriendController } from './UserFriendController'; export { default as PuppyController } from './PuppyController'; export { default as AnimalPersonalityController } from './AnimalPersonalityController'; export { default as AnimalSpecieController } from './AnimalSpecieController'; export { default as AnimalBreedController } from './AnimalBreedController'; export { default as AnimalPlaceController } from './AnimalPlaceController'; <file_sep>/src/common/validator/IsValidAnimalBreedArray.ts import { // eslint-disable-next-line no-unused-vars ValidationOptions, registerDecorator, ValidatorConstraint, // eslint-disable-next-line no-unused-vars ValidatorConstraintInterface, // eslint-disable-next-line no-unused-vars ValidationArguments, } from 'class-validator'; import { getManager } from 'typeorm'; import AnimalBreed from '@app/db/entity/AnimalBreed'; @ValidatorConstraint({ async: true }) export class IsValidAnimalBreedArrayConstraint implements ValidatorConstraintInterface { private invalidIds: number[] = []; async validate(ids: number[]) { if (ids.length === 0) return true; const validIds: number[] = await getManager() .find(AnimalBreed, { select: ['id'], where: ids.map((id) => { return { id }; }), }) .then((validBreeds) => validBreeds.map((breed) => breed.id)); this.invalidIds = ids.filter((id) => validIds.indexOf(id) === -1); return this.invalidIds.length === 0; } defaultMessage(args: ValidationArguments) { return `${args.property} must contain valid identifiers, ${JSON.stringify(this.invalidIds)} ${ this.invalidIds.length === 1 ? 'is' : 'are' } unknown`; } } export default function IsValidAnimalBreedArray(validationOptions?: ValidationOptions) { return (object: Object, propertyName: string) => { registerDecorator({ name: 'isValidAnimalBreedArray', target: object.constructor, propertyName, options: validationOptions, validator: IsValidAnimalBreedArrayConstraint, }); }; } <file_sep>/src/route/site/index.ts // eslint-disable-next-line no-unused-vars import { Router, Request, Response, NextFunction } from 'express'; import { getManager } from 'typeorm'; import moment from 'moment'; import config from '@app/config'; import UserPasswordReset from '@app/db/entity/UserPasswordReset'; // eslint-disable-next-line no-unused-vars import User from '@app/db/entity/User'; import legal from './legal'; const router = Router(); const animals = [ '🙈', '🙉', '🙊', '🐵', '🐒', '🦍', '🦧', '🐶', '🐕', '🦮', '🐕‍🦺', '🐩', '🐺', '🦊', '🦝', '🐱', '🐈', '🦁', '🐯', '🐅', '🐆', '🐴', '🐎', '🦄', '🦓', '🦌', '🐮', '🐂', '🐂', '🐄', '🐷', '🐖', '🐗', '🐏', '🐑', '🐐', '🐪', '🐫', '🦙', '🦒', '🐘', '🦏', '🦛', '🐭', '🐁', '🐀', '🐹', '🐰', '🐇', '🐿', '🦔', '🦇', '🐻', '🐨', '🐼', '🦥', '🦦', '🦨', '🦘', '🦡', '🐾', '🦃', '🐔', '🐓', '🐣', '🐤', '🐥', '🐦', '🐧', '🕊', '🦅', '🦆', '🦢', '🦉', '🦩', '🦚', '🦜', '🐸', '🐊', '🐢', '🦎', '🐍', '🐉', '🦕', '🦖', '🐳', '🐋', '🐬', '🐟', '🐠', '🐡', '🦈', '🐙', '🐚', '🐌', '🦋', '🐛', '🐜', '🐝', '🐞', '🦗', '🕷', '🦂', '🦟', '🦠', '🦀', '🦞', '🦐', '🦑', ]; router.use('/legal', legal); router.get('', (_req: Request, res: Response) => { res.render('index', { title: 'Home', animal: animals[Math.floor(Math.random() * animals.length)], }); }); router.get('/password_reset/:token', async (req: Request, res: Response, next: NextFunction) => { const { token } = req.params; let options: { user?: User; token?: string; used?: boolean } = { user: undefined, token: undefined, used: undefined, }; try { const userPasswordReset = await getManager().findOne(UserPasswordReset, { where: { token }, relations: ['user'], }); if ( userPasswordReset && !userPasswordReset.used && moment(new Date()).diff(userPasswordReset.updated_at, 'minutes') <= config.SECURITY.TOKEN.PASSWORD.EXPIRES_IN ) { options = { user: userPasswordReset.user, token: userPasswordReset.token, used: false, }; } else if (userPasswordReset && userPasswordReset.used) { options = { user: userPasswordReset.user, used: true, }; } else if (userPasswordReset) { options = { user: userPasswordReset.user, }; } res.render('password_reset', { title: 'Password reset', ...options }); } catch (ex) { next(ex); } }); export default router;
f3662e66211c3ca5dced8cd452b7a9949fe822dc
[ "Markdown", "TypeScript", "Shell" ]
80
TypeScript
carlocorradini/happypuppy-server
a1d6313cd8e409b89971a7bc94fef34e696f0901
4e1ae59ba9f8cb6b1e7a79c9830250b7ab225420
refs/heads/master
<file_sep>package com.awse.domain; import java.util.List; import lombok.Data; @Data public class CompanyVO { private int id; private String name; private String address; private List<EmployeeVO> employeeList; } <file_sep>package com.awse.domain; import lombok.Data; @Data public class EmployeeVO { private int id; private int companyId; private String name; private String address; } <file_sep>package com.awse.mapper; import java.util.List; import com.awse.domain.EmployeeVO; public interface EmployeeMapper { public int insert(EmployeeVO employee); public List<EmployeeVO> getList(); public EmployeeVO getById(int id); public List<EmployeeVO> getByCompanyId(int companyId); } <file_sep>package com.awse.controller; import java.util.List; import javax.servlet.http.HttpServletRequest; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import com.awse.domain.CompanyVO; import com.awse.mapper.CompanyMapper; import com.awse.service.CompanyService; import lombok.extern.log4j.Log4j; @RestController @RequestMapping("/company") @Log4j public class CompanyController { @Autowired private CompanyMapper companyMapper; @Autowired private CompanyService service; @PostMapping("") public CompanyVO register(@RequestBody CompanyVO company) { log.info(company); companyMapper.insert(company); log.info(company); return company; } @GetMapping("") public List<CompanyVO> getList() { // service.getList().forEach(list -> log.info(list)); companyMapper.getList().forEach(list -> log.info(list)); return companyMapper.getList(); } @GetMapping("/{id}") public CompanyVO get(@PathVariable("id") int id) { log.info("Company Id : " + id); return companyMapper.getById(id); } } <file_sep># Spring-MyBatis-XML ---------- ## boot-myBatis Repository 의 XML 버전 > Boot와 다르게 Spring Legacy 에서는 XML방식의 사용이 가능하므로 > 연습했던 것과 다르게 XML방식으로 같은 과정을 구현하였음. ------------------- + DB 테이블 칼럼명과 Java Property가 일치하지 않을경우 매핑시켜주는 코드 **Boot** ``` @Results(id="CompanyMap", value={ @Result(property = "name", column = "company_name"), @Result(property = "address", column = "company_address"), @Result(property = "employeeList", column = "id", many = @Many(select = "com.example.demo.mapper.EmployeeMapper.getByCompanyId")) }) ``` **XML** ``` <resultMap type="com.awse.domain.CompanyVO" id="companyMap"> <id property="id" column="id" /> <result property="id" column="id" /> <result property="name" column="company_name" /> <result property="address" column="company_address" /> <collection property="employeeList" column="id" select="com.awse.mapper.EmployeeMapper.getByCompanyId" /> </resultMap> <select ~~~ resultMap="companyMap"> 생략 </select> ``` ----------------------- + Insert시 AutoIncreament로 생성되는 PK값을 즉시 Insert한 데이터에 pk값을 넣어주는 코드 **Boot** ``` @Insert("insert into company ( company_name, company_address) values ( #{company.name}, #{company.address} ) ") @Options(useGeneratedKeys = true, keyProperty = "id") int insert(@Param("company") Company company); ``` **XML** ``` <insert id="insert" parameterType="com.awse.domain.CompanyVO" useGeneratedKeys="true" keyProperty="id"> insert into company ( company_name, company_address ) values ( #{name} , #{address} ) </insert> ```
e7a214139cf2671a2a9363915499c93716ac8d63
[ "Markdown", "Java" ]
5
Java
keede7/Spring-MyBatis-XML
646381159b2df29220d790f7ac8ebac00fb281b6
01fcde396e901678c7f840b99c3c20b72bf66316
refs/heads/master
<repo_name>Dallas62/SchemaOrgRepository<file_sep>/GenerationSamples/LymphaticVesselBundle/Model/AbstractLymphaticVessel.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\LymphaticVesselBundle\Model; use SchemaRepository\Bundle\LymphaticVesselBundle\Model\LymphaticVesselInterface; use SchemaRepository\Bundle\VesselBundle\Model\AbstractVessel; /** * Model of Lymphatic Vessel * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\LymphaticVesselBundle\Model */ abstract class AbstractLymphaticVessel extends AbstractVessel implements LymphaticVesselInterface { /** * The vasculature the lymphatic structure originates, or afferents, * from. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $originatesFrom; /** * The anatomical or organ system drained by this vessel; generally * refers to a specific part of an organ. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $regionDrained; /** * The vasculature the lymphatic structure runs, or efferents, to. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $runsTo; /** * Getter of Originates From * * The vasculature the lymphatic structure originates, or afferents, * from. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOriginatesFrom() { return $this->originatesFrom; } /** * Setter of Originates From * * The vasculature the lymphatic structure originates, or afferents, * from. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of originatesFrom */ public function setOriginatesFrom($value) { $this->originatesFrom = $value; } /** * Getter of Region Drained * * The anatomical or organ system drained by this vessel; generally * refers to a specific part of an organ. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRegionDrained() { return $this->regionDrained; } /** * Setter of Region Drained * * The anatomical or organ system drained by this vessel; generally * refers to a specific part of an organ. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of regionDrained */ public function setRegionDrained($value) { $this->regionDrained = $value; } /** * Getter of Runs to * * The vasculature the lymphatic structure runs, or efferents, to. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRunsTo() { return $this->runsTo; } /** * Setter of Runs to * * The vasculature the lymphatic structure runs, or efferents, to. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of runsTo */ public function setRunsTo($value) { $this->runsTo = $value; } } <file_sep>/GenerationSamples/TVEpisodeBundle/Model/TVEpisodeInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\TVEpisodeBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of TV Episode Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\TVEpisodeBundle\Model */ interface TVEpisodeInterface extends CreativeWorkInterface { /** * Getter of Actor * * A cast member of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getActor(); /** * Getter of Actors * * A cast member of the movie, TV series, season, or episode, or video. * (legacy spelling; see singular form, actor) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getActors(); /** * Getter of Director * * The director of the movie, TV episode, or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDirector(); /** * Getter of Episode Number * * The episode number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getEpisodeNumber(); /** * Getter of Music by * * The composer of the movie or TV soundtrack. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMusicBy(); /** * Getter of Part of Season * * The season to which this episode belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfSeason(); /** * Getter of Part of TV Series * * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfTVSeries(); /** * Getter of Producer * * The producer of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProducer(); /** * Getter of Production Company * * The production company or studio that made the movie, TV series, * season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProductionCompany(); /** * Getter of Trailer * * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrailer(); } <file_sep>/GenerationSamples/CivicStructureBundle/Model/CivicStructureInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\CivicStructureBundle\Model; use SchemaRepository\Bundle\PlaceBundle\Model\PlaceInterface; /** * Interface of Civic Structure Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\CivicStructureBundle\Model */ interface CivicStructureInterface extends PlaceInterface { /** * Getter of Opening Hours * * The opening hours for a business. Opening hours can be specified as a * weekly time range, starting with days, then times per day. Multiple * days can be listed with commas ',' separating each day. Day or time * ranges are specified using a hyphen '-'.- Days are specified using the * following two-letter combinations: Mo, Tu, We, Th, Fr, Sa, Su.- Times * are specified using 24:00 time. For example, 3pm is specified as * 15:00. - Here is an example: <time itemprop="openingHours" * datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time>. - If * a business is open 7 days a week, then it can be specified as <time * itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all * day</time>. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOpeningHours(); } <file_sep>/GenerationSamples/MedicalConditionBundle/Model/MedicalConditionInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalConditionBundle\Model; use SchemaRepository\Bundle\MedicalEntityBundle\Model\MedicalEntityInterface; /** * Interface of Medical Condition Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalConditionBundle\Model */ interface MedicalConditionInterface extends MedicalEntityInterface { /** * Getter of Associated Anatomy * * The anatomy of the underlying organ system or structures associated * with this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAssociatedAnatomy(); /** * Getter of Cause * * An underlying cause. More specifically, one of the causative agent(s) * that are most directly responsible for the pathophysiologic process * that eventually results in the occurrence. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCause(); /** * Getter of Differential Diagnosis * * One of a set of differential diagnoses for the condition. * Specifically, a closely-related or competing diagnosis typically * considered later in the cognitive process whereby this medical * condition is distinguished from others most likely responsible for a * similar collection of signs and symptoms to reach the most * parsimonious diagnosis or diagnoses in a patient. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDifferentialDiagnosis(); /** * Getter of Epidemiology * * The characteristics of associated patients, such as age, gender, race * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEpidemiology(); /** * Getter of Expected Prognosis * * The likely outcome in either the short term or long term of the * medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExpectedPrognosis(); /** * Getter of Natural Progression * * The expected progression of the condition if it is not treated and * allowed to progress naturally. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getNaturalProgression(); /** * Getter of Pathophysiology * * Changes in the normal mechanical, physical, and biochemical functions * that are associated with this activity or condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPathophysiology(); /** * Getter of Possible Complication * * A possible unexpected and unfavorable evolution of a medical * condition. Complications may include worsening of the signs or * symptoms of the disease, extension of the condition to other organ * systems, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPossibleComplication(); /** * Getter of Possible Treatment * * A possible treatment to address this condition, sign or symptom. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPossibleTreatment(); /** * Getter of Primary Prevention * * A preventative therapy used to prevent an initial occurrence of the * medical condition, such as vaccination. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPrimaryPrevention(); /** * Getter of Risk Factor * * A modifiable or non-modifiable factor that increases the risk of a * patient contracting this condition, e.g. age, coexisting condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRiskFactor(); /** * Getter of Secondary Prevention * * A preventative therapy used to prevent reoccurrence of the medical * condition after an initial episode of the condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSecondaryPrevention(); /** * Getter of Sign or Symptom * * A sign or symptom of this condition. Signs are objective or physically * observable manifestations of the medical condition while symptoms are * the subjective experienceof the medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSignOrSymptom(); /** * Getter of Stage * * The stage of the condition, if applicable. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStage(); /** * Getter of Subtype * * A more specific type of the condition, where applicable, for example * 'Type 1 Diabetes', 'Type 2 Diabetes', or 'Gestational Diabetes' for * Diabetes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSubtype(); /** * Getter of Typical Test * * A medical test typically performed given this condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTypicalTest(); } <file_sep>/GenerationSamples/CreativeWorkBundle/Model/AbstractCreativeWork.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\CreativeWorkBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; use SchemaRepository\Bundle\ThingBundle\Model\AbstractThing; /** * Model of Creative Work * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\CreativeWorkBundle\Model */ abstract class AbstractCreativeWork extends AbstractThing implements CreativeWorkInterface { /** * The subject matter of the content. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $about; /** * Specifies the Person that is legally accountable for the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $accountablePerson; /** * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $aggregateRating; /** * A secondary title of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $alternativeHeadline; /** * The media objects that encode this creative work. This property is a * synonym for encodings. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $associatedMedia; /** * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $audience; /** * An embedded audio object. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $audio; /** * The author of this content. Please note that author is special in that * HTML 5 provides a special mechanism for indicating authorship via the * rel tag. That is equivalent to this and may be used interchangeably. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $author; /** * An award won by this person or for this creative work. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $award; /** * Awards won by this person or for this creative work. (legacy spelling; * see singular form, award) * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $awards; /** * A citation or reference to another creative work, such as another * publication, web page, scholarly article, etc. NOTE: Candidate for * promotion to ScholarlyArticle. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $citation; /** * Comments, typically from users, on this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $comment; /** * The location of the content. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $contentLocation; /** * Official rating of a piece of content—for example,'MPAA PG-13'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $contentRating; /** * A secondary contributor to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $contributor; /** * The party holding the legal copyright to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $copyrightHolder; /** * The year during which the claimed copyright for the CreativeWork was * first asserted. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $copyrightYear; /** * The creator/author of this CreativeWork or UserComments. This is the * same as the Author property for CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $creator; /** * The date on which the CreativeWork was created. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $dateCreated; /** * The date on which the CreativeWork was most recently modified. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $dateModified; /** * Date of first broadcast/publication. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $datePublished; /** * A link to the page containing the comments of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $discussionUrl; /** * Specifies the Person who edited the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $editor; /** * An alignment to an established educational framework. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $educationalAlignment; /** * The purpose of a work in the context of education; for example, * 'assignment', 'group work'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $educationalUse; /** * A media object that encode this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $encoding; /** * The media objects that encode this creative work (legacy spelling; see * singular form, encoding). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $encodings; /** * Genre of the creative work * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $genre; /** * Headline of the article * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $headline; /** * The language of the content. please use one of the language codes from * the IETF BCP 47 standard. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $inLanguage; /** * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $interactionCount; /** * The predominant mode of learning supported by the learning resource. * Acceptable values are 'active', 'expositive', or 'mixed'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $interactivityType; /** * A resource that was used in the creation of this resource. This term * can be repeated for multiple sources. For example, * http://example.com/great-multiplication-intro.html * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $isBasedOnUrl; /** * Indicates whether this content is family friendly. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var bool */ protected $isFamilyFriendly; /** * The keywords/tags used to describe this content. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $keywords; /** * The predominant type or kind characterizing the learning resource. For * example, 'presentation', 'handout'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $learningResourceType; /** * Indicates that the CreativeWork contains a reference to, but is not * necessarily about a concept. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $mentions; /** * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $offers; /** * Specifies the Person or Organization that distributed the * CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $provider; /** * The publisher of the creative work. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $publisher; /** * Link to page describing the editorial principles of the organization * primarily responsible for the creation of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $publishingPrinciples; /** * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $review; /** * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $reviews; /** * The Organization on whose behalf the creator was working. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $sourceOrganization; /** * The textual content of this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $text; /** * A thumbnail image relevant to the Thing. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $thumbnailUrl; /** * Approximate or typical time it takes to work with or through this * learning resource for the typical intended target audience, e.g. * 'P30M', 'P1H25M'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $timeRequired; /** * The typical range of ages the content's intendedEndUser, for example * '7-9', '11-'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $typicalAgeRange; /** * The version of the CreativeWork embodied by a specified resource. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $version; /** * An embedded video object. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $video; /** * Getter of About * * The subject matter of the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAbout() { return $this->about; } /** * Setter of About * * The subject matter of the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of about */ public function setAbout($value) { $this->about = $value; } /** * Getter of Accountable Person * * Specifies the Person that is legally accountable for the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAccountablePerson() { return $this->accountablePerson; } /** * Setter of Accountable Person * * Specifies the Person that is legally accountable for the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of accountablePerson */ public function setAccountablePerson($value) { $this->accountablePerson = $value; } /** * Getter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAggregateRating() { return $this->aggregateRating; } /** * Setter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of aggregateRating */ public function setAggregateRating($value) { $this->aggregateRating = $value; } /** * Getter of Alternative Headline * * A secondary title of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlternativeHeadline() { return $this->alternativeHeadline; } /** * Setter of Alternative Headline * * A secondary title of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of alternativeHeadline */ public function setAlternativeHeadline($value) { $this->alternativeHeadline = $value; } /** * Getter of Associated Media * * The media objects that encode this creative work. This property is a * synonym for encodings. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAssociatedMedia() { return $this->associatedMedia; } /** * Setter of Associated Media * * The media objects that encode this creative work. This property is a * synonym for encodings. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of associatedMedia */ public function setAssociatedMedia($value) { $this->associatedMedia = $value; } /** * Getter of Audience * * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAudience() { return $this->audience; } /** * Setter of Audience * * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of audience */ public function setAudience($value) { $this->audience = $value; } /** * Getter of Audio * * An embedded audio object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAudio() { return $this->audio; } /** * Setter of Audio * * An embedded audio object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of audio */ public function setAudio($value) { $this->audio = $value; } /** * Getter of Author * * The author of this content. Please note that author is special in that * HTML 5 provides a special mechanism for indicating authorship via the * rel tag. That is equivalent to this and may be used interchangeably. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAuthor() { return $this->author; } /** * Setter of Author * * The author of this content. Please note that author is special in that * HTML 5 provides a special mechanism for indicating authorship via the * rel tag. That is equivalent to this and may be used interchangeably. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of author */ public function setAuthor($value) { $this->author = $value; } /** * Getter of Award * * An award won by this person or for this creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAward() { return $this->award; } /** * Setter of Award * * An award won by this person or for this creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of award */ public function setAward($value) { $this->award = $value; } /** * Getter of Awards * * Awards won by this person or for this creative work. (legacy spelling; * see singular form, award) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAwards() { return $this->awards; } /** * Setter of Awards * * Awards won by this person or for this creative work. (legacy spelling; * see singular form, award) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of awards */ public function setAwards($value) { $this->awards = $value; } /** * Getter of Citation * * A citation or reference to another creative work, such as another * publication, web page, scholarly article, etc. NOTE: Candidate for * promotion to ScholarlyArticle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCitation() { return $this->citation; } /** * Setter of Citation * * A citation or reference to another creative work, such as another * publication, web page, scholarly article, etc. NOTE: Candidate for * promotion to ScholarlyArticle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of citation */ public function setCitation($value) { $this->citation = $value; } /** * Getter of Comment * * Comments, typically from users, on this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getComment() { return $this->comment; } /** * Setter of Comment * * Comments, typically from users, on this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of comment */ public function setComment($value) { $this->comment = $value; } /** * Getter of Content Location * * The location of the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContentLocation() { return $this->contentLocation; } /** * Setter of Content Location * * The location of the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contentLocation */ public function setContentLocation($value) { $this->contentLocation = $value; } /** * Getter of Content Rating * * Official rating of a piece of content—for example,'MPAA PG-13'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getContentRating() { return $this->contentRating; } /** * Setter of Content Rating * * Official rating of a piece of content—for example,'MPAA PG-13'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of contentRating */ public function setContentRating($value) { $this->contentRating = $value; } /** * Getter of Contributor * * A secondary contributor to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContributor() { return $this->contributor; } /** * Setter of Contributor * * A secondary contributor to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contributor */ public function setContributor($value) { $this->contributor = $value; } /** * Getter of Copyright Holder * * The party holding the legal copyright to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCopyrightHolder() { return $this->copyrightHolder; } /** * Setter of Copyright Holder * * The party holding the legal copyright to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of copyrightHolder */ public function setCopyrightHolder($value) { $this->copyrightHolder = $value; } /** * Getter of Copyright Year * * The year during which the claimed copyright for the CreativeWork was * first asserted. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getCopyrightYear() { return $this->copyrightYear; } /** * Setter of Copyright Year * * The year during which the claimed copyright for the CreativeWork was * first asserted. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of copyrightYear */ public function setCopyrightYear($value) { $this->copyrightYear = $value; } /** * Getter of Creator * * The creator/author of this CreativeWork or UserComments. This is the * same as the Author property for CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCreator() { return $this->creator; } /** * Setter of Creator * * The creator/author of this CreativeWork or UserComments. This is the * same as the Author property for CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of creator */ public function setCreator($value) { $this->creator = $value; } /** * Getter of Date Created * * The date on which the CreativeWork was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDateCreated() { return $this->dateCreated; } /** * Setter of Date Created * * The date on which the CreativeWork was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of dateCreated */ public function setDateCreated($value) { $this->dateCreated = $value; } /** * Getter of Date Modified * * The date on which the CreativeWork was most recently modified. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDateModified() { return $this->dateModified; } /** * Setter of Date Modified * * The date on which the CreativeWork was most recently modified. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of dateModified */ public function setDateModified($value) { $this->dateModified = $value; } /** * Getter of Date Published * * Date of first broadcast/publication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDatePublished() { return $this->datePublished; } /** * Setter of Date Published * * Date of first broadcast/publication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of datePublished */ public function setDatePublished($value) { $this->datePublished = $value; } /** * Getter of Discussion Url * * A link to the page containing the comments of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDiscussionUrl() { return $this->discussionUrl; } /** * Setter of Discussion Url * * A link to the page containing the comments of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of discussionUrl */ public function setDiscussionUrl($value) { $this->discussionUrl = $value; } /** * Getter of Editor * * Specifies the Person who edited the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEditor() { return $this->editor; } /** * Setter of Editor * * Specifies the Person who edited the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of editor */ public function setEditor($value) { $this->editor = $value; } /** * Getter of Educational Alignment * * An alignment to an established educational framework. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEducationalAlignment() { return $this->educationalAlignment; } /** * Setter of Educational Alignment * * An alignment to an established educational framework. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of educationalAlignment */ public function setEducationalAlignment($value) { $this->educationalAlignment = $value; } /** * Getter of Educational Use * * The purpose of a work in the context of education; for example, * 'assignment', 'group work'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEducationalUse() { return $this->educationalUse; } /** * Setter of Educational Use * * The purpose of a work in the context of education; for example, * 'assignment', 'group work'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of educationalUse */ public function setEducationalUse($value) { $this->educationalUse = $value; } /** * Getter of Encoding * * A media object that encode this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEncoding() { return $this->encoding; } /** * Setter of Encoding * * A media object that encode this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of encoding */ public function setEncoding($value) { $this->encoding = $value; } /** * Getter of Encodings * * The media objects that encode this creative work (legacy spelling; see * singular form, encoding). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEncodings() { return $this->encodings; } /** * Setter of Encodings * * The media objects that encode this creative work (legacy spelling; see * singular form, encoding). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of encodings */ public function setEncodings($value) { $this->encodings = $value; } /** * Add encoding to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of encoding */ public function addEncoding($value) { $this->encodings[] = $value; } /** * Remove encoding to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of encoding */ public function removeEncoding($value) { $key = array_search($value, $this->encodings); if($key !== false) { unset($this->encodings[$key]); } } /** * Getter of Genre * * Genre of the creative work * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGenre() { return $this->genre; } /** * Setter of Genre * * Genre of the creative work * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of genre */ public function setGenre($value) { $this->genre = $value; } /** * Getter of Headline * * Headline of the article * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getHeadline() { return $this->headline; } /** * Setter of Headline * * Headline of the article * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of headline */ public function setHeadline($value) { $this->headline = $value; } /** * Getter of In Language * * The language of the content. please use one of the language codes from * the IETF BCP 47 standard. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInLanguage() { return $this->inLanguage; } /** * Setter of In Language * * The language of the content. please use one of the language codes from * the IETF BCP 47 standard. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of inLanguage */ public function setInLanguage($value) { $this->inLanguage = $value; } /** * Getter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInteractionCount() { return $this->interactionCount; } /** * Setter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of interactionCount */ public function setInteractionCount($value) { $this->interactionCount = $value; } /** * Getter of Interactivity Type * * The predominant mode of learning supported by the learning resource. * Acceptable values are 'active', 'expositive', or 'mixed'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInteractivityType() { return $this->interactivityType; } /** * Setter of Interactivity Type * * The predominant mode of learning supported by the learning resource. * Acceptable values are 'active', 'expositive', or 'mixed'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of interactivityType */ public function setInteractivityType($value) { $this->interactivityType = $value; } /** * Getter of Is Based On Url * * A resource that was used in the creation of this resource. This term * can be repeated for multiple sources. For example, * http://example.com/great-multiplication-intro.html * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIsBasedOnUrl() { return $this->isBasedOnUrl; } /** * Setter of Is Based On Url * * A resource that was used in the creation of this resource. This term * can be repeated for multiple sources. For example, * http://example.com/great-multiplication-intro.html * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of isBasedOnUrl */ public function setIsBasedOnUrl($value) { $this->isBasedOnUrl = $value; } /** * Getter of Is Family Friendly * * Indicates whether this content is family friendly. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getIsFamilyFriendly() { return $this->isFamilyFriendly; } /** * Setter of Is Family Friendly * * Indicates whether this content is family friendly. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param bool $value Value of isFamilyFriendly */ public function setIsFamilyFriendly($value) { $this->isFamilyFriendly = $value; } /** * Getter of Keywords * * The keywords/tags used to describe this content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getKeywords() { return $this->keywords; } /** * Setter of Keywords * * The keywords/tags used to describe this content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of keywords */ public function setKeywords($value) { $this->keywords = $value; } /** * Getter of Learning Resource Type * * The predominant type or kind characterizing the learning resource. For * example, 'presentation', 'handout'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getLearningResourceType() { return $this->learningResourceType; } /** * Setter of Learning Resource Type * * The predominant type or kind characterizing the learning resource. For * example, 'presentation', 'handout'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of learningResourceType */ public function setLearningResourceType($value) { $this->learningResourceType = $value; } /** * Getter of Mentions * * Indicates that the CreativeWork contains a reference to, but is not * necessarily about a concept. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMentions() { return $this->mentions; } /** * Setter of Mentions * * Indicates that the CreativeWork contains a reference to, but is not * necessarily about a concept. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of mentions */ public function setMentions($value) { $this->mentions = $value; } /** * Getter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>.fr> * * @access public * * @return mixed */ public function getOffers() { return $this->offers; } /** * Setter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of offers */ public function setOffers($value) { $this->offers = $value; } /** * Getter of Provider * * Specifies the Person or Organization that distributed the * CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProvider() { return $this->provider; } /** * Setter of Provider * * Specifies the Person or Organization that distributed the * CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of provider */ public function setProvider($value) { $this->provider = $value; } /** * Getter of Publisher * * The publisher of the creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPublisher() { return $this->publisher; } /** * Setter of Publisher * * The publisher of the creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of publisher */ public function setPublisher($value) { $this->publisher = $value; } /** * Getter of Publishing Principles * * Link to page describing the editorial principles of the organization * primarily responsible for the creation of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPublishingPrinciples() { return $this->publishingPrinciples; } /** * Setter of Publishing Principles * * Link to page describing the editorial principles of the organization * primarily responsible for the creation of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of publishingPrinciples */ public function setPublishingPrinciples($value) { $this->publishingPrinciples = $value; } /** * Getter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReview() { return $this->review; } /** * Setter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function setReview($value) { $this->review = $value; } /** * Getter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getReviews() { return $this->reviews; } /** * Setter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of reviews */ public function setReviews($value) { $this->reviews = $value; } /** * Add review to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function addReview($value) { $this->reviews[] = $value; } /** * Remove review to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function removeReview($value) { $key = array_search($value, $this->reviews); if($key !== false) { unset($this->reviews[$key]); } } /** * Getter of Source Organization * * The Organization on whose behalf the creator was working. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSourceOrganization() { return $this->sourceOrganization; } /** * Setter of Source Organization * * The Organization on whose behalf the creator was working. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sourceOrganization */ public function setSourceOrganization($value) { $this->sourceOrganization = $value; } /** * Getter of Text * * The textual content of this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getText() { return $this->text; } /** * Setter of Text * * The textual content of this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of text */ public function setText($value) { $this->text = $value; } /** * Getter of Thumbnail Url * * A thumbnail image relevant to the Thing. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getThumbnailUrl() { return $this->thumbnailUrl; } /** * Setter of Thumbnail Url * * A thumbnail image relevant to the Thing. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of thumbnailUrl */ public function setThumbnailUrl($value) { $this->thumbnailUrl = $value; } /** * Getter of Time Required * * Approximate or typical time it takes to work with or through this * learning resource for the typical intended target audience, e.g. * 'P30M', 'P1H25M'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTimeRequired() { return $this->timeRequired; } /** * Setter of Time Required * * Approximate or typical time it takes to work with or through this * learning resource for the typical intended target audience, e.g. * 'P30M', 'P1H25M'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of timeRequired */ public function setTimeRequired($value) { $this->timeRequired = $value; } /** * Getter of Typical Age Range * * The typical range of ages the content's intendedEndUser, for example * '7-9', '11-'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTypicalAgeRange() { return $this->typicalAgeRange; } /** * Setter of Typical Age Range * * The typical range of ages the content's intendedEndUser, for example * '7-9', '11-'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of typicalAgeRange */ public function setTypicalAgeRange($value) { $this->typicalAgeRange = $value; } /** * Getter of Version * * The version of the CreativeWork embodied by a specified resource. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getVersion() { return $this->version; } /** * Setter of Version * * The version of the CreativeWork embodied by a specified resource. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of version */ public function setVersion($value) { $this->version = $value; } /** * Getter of Video * * An embedded video object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getVideo() { return $this->video; } /** * Setter of Video * * An embedded video object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of video */ public function setVideo($value) { $this->video = $value; } } <file_sep>/GenerationSamples/AnatomicalStructureBundle/Model/AbstractAnatomicalStructure.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\AnatomicalStructureBundle\Model; use SchemaRepository\Bundle\AnatomicalStructureBundle\Model\AnatomicalStructureInterface; use SchemaRepository\Bundle\MedicalEntityBundle\Model\AbstractMedicalEntity; /** * Model of Anatomical Structure * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\AnatomicalStructureBundle\Model */ abstract class AbstractAnatomicalStructure extends AbstractMedicalEntity implements AnatomicalStructureInterface { /** * If applicable, a description of the pathophysiology associated with * the anatomical system, including potential abnormal changes in the * mechanical, physical, and biochemical functions of the system. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $associatedPathophysiology; /** * Location in the body of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $bodyLocation; /** * Other anatomical structures to which this structure is connected. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $connectedTo; /** * An image containing a diagram that illustrates the structure and/or * its component substructures and/or connections with other structures. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $diagram; /** * Function of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $function; /** * The anatomical or organ system that this structure is part of. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $partOfSystem; /** * A medical condition associated with this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relatedCondition; /** * A medical therapy related to this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relatedTherapy; /** * Component (sub-)structure(s) that comprise this anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $subStructure; /** * Getter of Associated Pathophysiology * * If applicable, a description of the pathophysiology associated with * the anatomical system, including potential abnormal changes in the * mechanical, physical, and biochemical functions of the system. * * @author SchemaGenerator <<EMAIL>@free.fr> * * @access public * * @return string */ public function getAssociatedPathophysiology() { return $this->associatedPathophysiology; } /** * Setter of Associated Pathophysiology * * If applicable, a description of the pathophysiology associated with * the anatomical system, including potential abnormal changes in the * mechanical, physical, and biochemical functions of the system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of associatedPathophysiology */ public function setAssociatedPathophysiology($value) { $this->associatedPathophysiology = $value; } /** * Getter of Body Location * * Location in the body of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBodyLocation() { return $this->bodyLocation; } /** * Setter of Body Location * * Location in the body of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of bodyLocation */ public function setBodyLocation($value) { $this->bodyLocation = $value; } /** * Getter of Connected to * * Other anatomical structures to which this structure is connected. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getConnectedTo() { return $this->connectedTo; } /** * Setter of Connected to * * Other anatomical structures to which this structure is connected. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of connectedTo */ public function setConnectedTo($value) { $this->connectedTo = $value; } /** * Getter of Diagram * * An image containing a diagram that illustrates the structure and/or * its component substructures and/or connections with other structures. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDiagram() { return $this->diagram; } /** * Setter of Diagram * * An image containing a diagram that illustrates the structure and/or * its component substructures and/or connections with other structures. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of diagram */ public function setDiagram($value) { $this->diagram = $value; } /** * Getter of Function * * Function of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFunction() { return $this->function; } /** * Setter of Function * * Function of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of function */ public function setFunction($value) { $this->function = $value; } /** * Getter of Part of System * * The anatomical or organ system that this structure is part of. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfSystem() { return $this->partOfSystem; } /** * Setter of Part of System * * The anatomical or organ system that this structure is part of. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of partOfSystem */ public function setPartOfSystem($value) { $this->partOfSystem = $value; } /** * Getter of Related Condition * * A medical condition associated with this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedCondition() { return $this->relatedCondition; } /** * Setter of Related Condition * * A medical condition associated with this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relatedCondition */ public function setRelatedCondition($value) { $this->relatedCondition = $value; } /** * Getter of Related Therapy * * A medical therapy related to this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedTherapy() { return $this->relatedTherapy; } /** * Setter of Related Therapy * * A medical therapy related to this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relatedTherapy */ public function setRelatedTherapy($value) { $this->relatedTherapy = $value; } /** * Getter of Sub Structure * * Component (sub-)structure(s) that comprise this anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSubStructure() { return $this->subStructure; } /** * Setter of Sub Structure * * Component (sub-)structure(s) that comprise this anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of subStructure */ public function setSubStructure($value) { $this->subStructure = $value; } } <file_sep>/GenerationSamples/MuscleBundle/Model/AbstractMuscle.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MuscleBundle\Model; use SchemaRepository\Bundle\MuscleBundle\Model\MuscleInterface; use SchemaRepository\Bundle\AnatomicalStructureBundle\Model\AbstractAnatomicalStructure; /** * Model of Muscle * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MuscleBundle\Model */ abstract class AbstractMuscle extends AbstractAnatomicalStructure implements MuscleInterface { /** * The movement the muscle generates. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $action; /** * The muscle whose action counteracts the specified muscle. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $antagonist; /** * The blood vessel that carries blood from the heart to the muscle. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $bloodSupply; /** * The place of attachment of a muscle, or what the muscle moves. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $insertion; /** * The underlying innervation associated with the muscle. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $nerve; /** * The place or point where a muscle arises. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $origin; /** * Getter of Action * * The movement the muscle generates. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAction() { return $this->action; } /** * Setter of Action * * The movement the muscle generates. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of action */ public function setAction($value) { $this->action = $value; } /** * Getter of Antagonist * * The muscle whose action counteracts the specified muscle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAntagonist() { return $this->antagonist; } /** * Setter of Antagonist * * The muscle whose action counteracts the specified muscle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of antagonist */ public function setAntagonist($value) { $this->antagonist = $value; } /** * Getter of Blood Supply * * The blood vessel that carries blood from the heart to the muscle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBloodSupply() { return $this->bloodSupply; } /** * Setter of Blood Supply * * The blood vessel that carries blood from the heart to the muscle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of bloodSupply */ public function setBloodSupply($value) { $this->bloodSupply = $value; } /** * Getter of Insertion * * The place of attachment of a muscle, or what the muscle moves. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInsertion() { return $this->insertion; } /** * Setter of Insertion * * The place of attachment of a muscle, or what the muscle moves. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of insertion */ public function setInsertion($value) { $this->insertion = $value; } /** * Getter of Nerve * * The underlying innervation associated with the muscle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getNerve() { return $this->nerve; } /** * Setter of Nerve * * The underlying innervation associated with the muscle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of nerve */ public function setNerve($value) { $this->nerve = $value; } /** * Getter of Origin * * The place or point where a muscle arises. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOrigin() { return $this->origin; } /** * Setter of Origin * * The place or point where a muscle arises. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of origin */ public function setOrigin($value) { $this->origin = $value; } } <file_sep>/GenerationSamples/CreativeWorkBundle/Model/CreativeWorkInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\CreativeWorkBundle\Model; use SchemaRepository\Bundle\ThingBundle\Model\ThingInterface; /** * Interface of Creative Work Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\CreativeWorkBundle\Model */ interface CreativeWorkInterface extends ThingInterface { /** * Getter of About * * The subject matter of the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAbout(); /** * Getter of Accountable Person * * Specifies the Person that is legally accountable for the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAccountablePerson(); /** * Getter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAggregateRating(); /** * Getter of Alternative Headline * * A secondary title of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlternativeHeadline(); /** * Getter of Associated Media * * The media objects that encode this creative work. This property is a * synonym for encodings. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAssociatedMedia(); /** * Getter of Audience * * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAudience(); /** * Getter of Audio * * An embedded audio object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAudio(); /** * Getter of Author * * The author of this content. Please note that author is special in that * HTML 5 provides a special mechanism for indicating authorship via the * rel tag. That is equivalent to this and may be used interchangeably. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAuthor(); /** * Getter of Award * * An award won by this person or for this creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAward(); /** * Getter of Awards * * Awards won by this person or for this creative work. (legacy spelling; * see singular form, award) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAwards(); /** * Getter of Citation * * A citation or reference to another creative work, such as another * publication, web page, scholarly article, etc. NOTE: Candidate for * promotion to ScholarlyArticle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCitation(); /** * Getter of Comment * * Comments, typically from users, on this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getComment(); /** * Getter of Content Location * * The location of the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContentLocation(); /** * Getter of Content Rating * * Official rating of a piece of content—for example,'MPAA PG-13'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getContentRating(); /** * Getter of Contributor * * A secondary contributor to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContributor(); /** * Getter of Copyright Holder * * The party holding the legal copyright to the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCopyrightHolder(); /** * Getter of Copyright Year * * The year during which the claimed copyright for the CreativeWork was * first asserted. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getCopyrightYear(); /** * Getter of Creator * * The creator/author of this CreativeWork or UserComments. This is the * same as the Author property for CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCreator(); /** * Getter of Date Created * * The date on which the CreativeWork was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDateCreated(); /** * Getter of Date Modified * * The date on which the CreativeWork was most recently modified. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDateModified(); /** * Getter of Date Published * * Date of first broadcast/publication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDatePublished(); /** * Getter of Discussion Url * * A link to the page containing the comments of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDiscussionUrl(); /** * Getter of Editor * * Specifies the Person who edited the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEditor(); /** * Getter of Educational Alignment * * An alignment to an established educational framework. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEducationalAlignment(); /** * Getter of Educational Use * * The purpose of a work in the context of education; for example, * 'assignment', 'group work'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEducationalUse(); /** * Getter of Encoding * * A media object that encode this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEncoding(); /** * Getter of Encodings * * The media objects that encode this creative work (legacy spelling; see * singular form, encoding). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEncodings(); /** * Getter of Genre * * Genre of the creative work * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGenre(); /** * Getter of Headline * * Headline of the article * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getHeadline(); /** * Getter of In Language * * The language of the content. please use one of the language codes from * the IETF BCP 47 standard. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInLanguage(); /** * Getter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInteractionCount(); /** * Getter of Interactivity Type * * The predominant mode of learning supported by the learning resource. * Acceptable values are 'active', 'expositive', or 'mixed'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInteractivityType(); /** * Getter of Is Based On Url * * A resource that was used in the creation of this resource. This term * can be repeated for multiple sources. For example, * http://example.com/great-multiplication-intro.html * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIsBasedOnUrl(); /** * Getter of Is Family Friendly * * Indicates whether this content is family friendly. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getIsFamilyFriendly(); /** * Getter of Keywords * * The keywords/tags used to describe this content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getKeywords(); /** * Getter of Learning Resource Type * * The predominant type or kind characterizing the learning resource. For * example, 'presentation', 'handout'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getLearningResourceType(); /** * Getter of Mentions * * Indicates that the CreativeWork contains a reference to, but is not * necessarily about a concept. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMentions(); /** * Getter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOffers(); /** * Getter of Provider * * Specifies the Person or Organization that distributed the * CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProvider(); /** * Getter of Publisher * * The publisher of the creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPublisher(); /** * Getter of Publishing Principles * * Link to page describing the editorial principles of the organization * primarily responsible for the creation of the CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPublishingPrinciples(); /** * Getter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReview(); /** * Getter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getReviews(); /** * Getter of Source Organization * * The Organization on whose behalf the creator was working. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSourceOrganization(); /** * Getter of Text * * The textual content of this CreativeWork. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getText(); /** * Getter of Thumbnail Url * * A thumbnail image relevant to the Thing. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getThumbnailUrl(); /** * Getter of Time Required * * Approximate or typical time it takes to work with or through this * learning resource for the typical intended target audience, e.g. * 'P30M', 'P1H25M'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTimeRequired(); /** * Getter of Typical Age Range * * The typical range of ages the content's intendedEndUser, for example * '7-9', '11-'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTypicalAgeRange(); /** * Getter of Version * * The version of the CreativeWork embodied by a specified resource. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getVersion(); /** * Getter of Video * * An embedded video object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getVideo(); } <file_sep>/GenerationSamples/DrugBundle/Document/AbstractDrug.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DrugBundle\Document; use SchemaRepository\Bundle\DrugBundle\Model\AbstractDrug; use Doctrine\ODM\MongoDB\Mapping\Annotations as ODM; use Symfony\Component\Validator\Constraints as Assert; /** * Drug Document * * @ODM\MappedSuperclass() */ abstract class AbstractDrug extends AbstractDrug { /** * {@inheritdoc} * * @ODM\Id */ protected $id; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $additionalType; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $description; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $image; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $name; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $sameAs; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $url; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $alternateName; /** * {@inheritdoc} * * @ODM\Field */ protected $code; /** * {@inheritdoc} * * @ODM\Field */ protected $guideline; /** * {@inheritdoc} * * @ODM\Field */ protected $medicineSystem; /** * {@inheritdoc} * * @ODM\Field */ protected $recognizingAuthority; /** * {@inheritdoc} * * @ODM\Field */ protected $relevantSpecialty; /** * {@inheritdoc} * * @ODM\Field */ protected $study; /** * {@inheritdoc} * * @ODM\Field */ protected $adverseOutcome; /** * {@inheritdoc} * * @ODM\Field */ protected $contraindication; /** * {@inheritdoc} * * @ODM\Field */ protected $duplicateTherapy; /** * {@inheritdoc} * * @ODM\Field */ protected $indication; /** * {@inheritdoc} * * @ODM\Field */ protected $seriousAdverseOutcome; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $activeIngredient; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $administrationRoute; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $alcoholWarning; /** * {@inheritdoc} * * @ODM\Field */ protected $availableStrength; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $breastfeedingWarning; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $clincalPharmacology; /** * {@inheritdoc} * * @ODM\Field */ protected $cost; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $dosageForm; /** * {@inheritdoc} * * @ODM\Field */ protected $doseSchedule; /** * {@inheritdoc} * * @ODM\Field */ protected $drugClass; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $foodWarning; /** * {@inheritdoc} * * @ODM\Field */ protected $interactingDrug; /** * {@inheritdoc} * * @ODM\Field */ protected $isAvailableGenerically; /** * {@inheritdoc} * * @ODM\Field */ protected $isProprietary; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $labelDetails; /** * {@inheritdoc} * * @ODM\Field */ protected $legalStatus; /** * {@inheritdoc} * * @ODM\Field */ protected $manufacturer; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $mechanismOfAction; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $nonProprietaryName; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $overdosage; /** * {@inheritdoc} * * @ODM\Field */ protected $pregnancyCategory; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $pregnancyWarning; /** * {@inheritdoc} * * @ODM\String * @Assert\Type(type="string") */ protected $prescribingInfo; /** * {@inheritdoc} * * @ODM\Field */ protected $prescriptionStatus; /** * {@inheritdoc} * * @ODM\Field */ protected $relatedDrug; /** * {@inheritdoc} * * @ODM\Field */ protected $warning; } <file_sep>/GenerationSamples/RecipeBundle/Model/RecipeInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\RecipeBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of Recipe Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\RecipeBundle\Model */ interface RecipeInterface extends CreativeWorkInterface { /** * Getter of Cook Time * * The time it takes to actually cook the dish, in ISO 8601 duration * format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCookTime(); /** * Getter of Cooking Method * * The method of cooking, such as Frying, Steaming, ... * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCookingMethod(); /** * Getter of Ingredients * * An ingredient used in the recipe. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIngredients(); /** * Getter of Nutrition * * Nutrition information about the recipe. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getNutrition(); /** * Getter of Prep Time * * The length of time it takes to prepare the recipe, in ISO 8601 * duration format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPrepTime(); /** * Getter of Recipe Category * * The category of the recipe—for example, appetizer, entree, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRecipeCategory(); /** * Getter of Recipe Cuisine * * The cuisine of the recipe (for example, French or Ethopian). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRecipeCuisine(); /** * Getter of Recipe Instructions * * The steps to make the dish. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRecipeInstructions(); /** * Getter of Recipe Yield * * The quantity produced by the recipe (for example, number of people * served, number of servings, etc). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRecipeYield(); /** * Getter of Total Time * * The total time it takes to prepare and cook the recipe, in ISO 8601 * duration format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTotalTime(); } <file_sep>/GenerationSamples/ProductBundle/Model/ProductInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ProductBundle\Model; use SchemaRepository\Bundle\ThingBundle\Model\ThingInterface; /** * Interface of Product Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ProductBundle\Model */ interface ProductInterface extends ThingInterface { /** * Getter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAggregateRating(); /** * Getter of Audience * * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAudience(); /** * Getter of Brand * * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBrand(); /** * Getter of Color * * The color of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getColor(); /** * Getter of Depth * * The depth of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDepth(); /** * Getter of Gtin13 * * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin13(); /** * Getter of Gtin14 * * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin14(); /** * Getter of Gtin8 * * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin8(); /** * Getter of Height * * The height of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHeight(); /** * Getter of Is Accessory or Spare Part for * * A pointer to another product (or multiple products) for which this * product is an accessory or spare part. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsAccessoryOrSparePartFor(); /** * Getter of Is Consumable for * * A pointer to another product (or multiple products) for which this * product is a consumable. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsConsumableFor(); /** * Getter of Is Related to * * A pointer to another, somehow related product (or multiple products). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsRelatedTo(); /** * Getter of Is Similar to * * A pointer to another, functionally similar product (or multiple * products). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsSimilarTo(); /** * Getter of Item Condition * * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getItemCondition(); /** * Getter of Logo * * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLogo(); /** * Getter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getManufacturer(); /** * Getter of Model * * The model of the product. Use with the URL of a ProductModel or a * textual representation of the model identifier. The URL of the * ProductModel can be from an external source. It is recommended to * additionally provide strong product identifiers via the * gtin8/gtin13/gtin14 and mpn properties. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getModel(); /** * Getter of Mpn * * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMpn(); /** * Getter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOffers(); /** * Getter of Product ID * * The product identifier, such as ISBN. For example: <meta * itemprop='productID' content='isbn:123-456-789'/>. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getProductID(); /** * Getter of Release Date * * The release date of a product or product model. This can be used to * distinguish the exact variant of a product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReleaseDate(); /** * Getter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReview(); /** * Getter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getReviews(); /** * Getter of Sku * * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSku(); /** * Getter of Weight * * The weight of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWeight(); /** * Getter of Width * * The width of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWidth(); } <file_sep>/GenerationSamples/MedicalGuidelineBundle/Model/AbstractMedicalGuideline.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalGuidelineBundle\Model; use SchemaRepository\Bundle\MedicalGuidelineBundle\Model\MedicalGuidelineInterface; use SchemaRepository\Bundle\MedicalEntityBundle\Model\AbstractMedicalEntity; /** * Model of Medical Guideline * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalGuidelineBundle\Model */ abstract class AbstractMedicalGuideline extends AbstractMedicalEntity implements MedicalGuidelineInterface { /** * Strength of evidence of the data used to formulate the guideline * (enumerated). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $evidenceLevel; /** * Source of the data used to formulate the guidance, e.g. RCT, consensus * opinion, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $evidenceOrigin; /** * Date on which this guideline's recommendation was made. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $guidelineDate; /** * The medical conditions, treatments, etc. that are the subject of the * guideline. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $guidelineSubject; /** * Getter of Evidence Level * * Strength of evidence of the data used to formulate the guideline * (enumerated). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEvidenceLevel() { return $this->evidenceLevel; } /** * Setter of Evidence Level * * Strength of evidence of the data used to formulate the guideline * (enumerated). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of evidenceLevel */ public function setEvidenceLevel($value) { $this->evidenceLevel = $value; } /** * Getter of Evidence Origin * * Source of the data used to formulate the guidance, e.g. RCT, consensus * opinion, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEvidenceOrigin() { return $this->evidenceOrigin; } /** * Setter of Evidence Origin * * Source of the data used to formulate the guidance, e.g. RCT, consensus * opinion, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of evidenceOrigin */ public function setEvidenceOrigin($value) { $this->evidenceOrigin = $value; } /** * Getter of Guideline Date * * Date on which this guideline's recommendation was made. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGuidelineDate() { return $this->guidelineDate; } /** * Setter of Guideline Date * * Date on which this guideline's recommendation was made. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of guidelineDate */ public function setGuidelineDate($value) { $this->guidelineDate = $value; } /** * Getter of Guideline Subject * * The medical conditions, treatments, etc. that are the subject of the * guideline. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGuidelineSubject() { return $this->guidelineSubject; } /** * Setter of Guideline Subject * * The medical conditions, treatments, etc. that are the subject of the * guideline. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of guidelineSubject */ public function setGuidelineSubject($value) { $this->guidelineSubject = $value; } } <file_sep>/GenerationSamples/EventBundle/Model/AbstractEvent.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\EventBundle\Model; use SchemaRepository\Bundle\EventBundle\Model\EventInterface; use SchemaRepository\Bundle\ThingBundle\Model\AbstractThing; /** * Model of Event * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\EventBundle\Model */ abstract class AbstractEvent extends AbstractThing implements EventInterface { /** * A person or organization attending the event. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $attendee; /** * A person attending the event (legacy spelling; see singular form, * attendee). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $attendees; /** * The duration of the item (movie, audio recording, event, etc.) in ISO * 8601 date format. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $duration; /** * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $endDate; /** * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $location; /** * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $offers; /** * A performer at the event—for example, a presenter, musician, musical * group or actor. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $performer; /** * The main performer or performers of the event—for example, a * presenter, musician, or actor (legacy spelling; see singular form, * performer). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $performers; /** * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $startDate; /** * An Event that is part of this event. For example, a conference event * includes many presentations, each are a subEvent of the conference. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $subEvent; /** * Events that are a part of this event. For example, a conference event * includes many presentations, each are subEvents of the conference * (legacy spelling; see singular form, subEvent). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $subEvents; /** * An event that this event is a part of. For example, a collection of * individual music performances might each have a music festival as * their superEvent. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $superEvent; /** * Getter of Attendee * * A person or organization attending the event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAttendee() { return $this->attendee; } /** * Setter of Attendee * * A person or organization attending the event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of attendee */ public function setAttendee($value) { $this->attendee = $value; } /** * Getter of Attendees * * A person attending the event (legacy spelling; see singular form, * attendee). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getAttendees() { return $this->attendees; } /** * Setter of Attendees * * A person attending the event (legacy spelling; see singular form, * attendee). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of attendees */ public function setAttendees($value) { $this->attendees = $value; } /** * Add attendee to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of attendee */ public function addAttendee($value) { $this->attendees[] = $value; } /** * Remove attendee to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of attendee */ public function removeAttendee($value) { $key = array_search($value, $this->attendees); if($key !== false) { unset($this->attendees[$key]); } } /** * Getter of Duration * * The duration of the item (movie, audio recording, event, etc.) in ISO * 8601 date format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDuration() { return $this->duration; } /** * Setter of Duration * * The duration of the item (movie, audio recording, event, etc.) in ISO * 8601 date format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of duration */ public function setDuration($value) { $this->duration = $value; } /** * Getter of End Date * * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEndDate() { return $this->endDate; } /** * Setter of End Date * * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of endDate */ public function setEndDate($value) { $this->endDate = $value; } /** * Getter of Location * * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLocation() { return $this->location; } /** * Setter of Location * * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of location */ public function setLocation($value) { $this->location = $value; } /** * Getter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOffers() { return $this->offers; } /** * Setter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of offers */ public function setOffers($value) { $this->offers = $value; } /** * Getter of Performer * * A performer at the event—for example, a presenter, musician, musical * group or actor. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPerformer() { return $this->performer; } /** * Setter of Performer * * A performer at the event—for example, a presenter, musician, musical * group or actor. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of performer */ public function setPerformer($value) { $this->performer = $value; } /** * Getter of Performers * * The main performer or performers of the event—for example, a * presenter, musician, or actor (legacy spelling; see singular form, * performer). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getPerformers() { return $this->performers; } /** * Setter of Performers * * The main performer or performers of the event—for example, a * presenter, musician, or actor (legacy spelling; see singular form, * performer). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of performers */ public function setPerformers($value) { $this->performers = $value; } /** * Add performer to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of performer */ public function addPerformer($value) { $this->performers[] = $value; } /** * Remove performer to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of performer */ public function removePerformer($value) { $key = array_search($value, $this->performers); if($key !== false) { unset($this->performers[$key]); } } /** * Getter of Start Date * * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStartDate() { return $this->startDate; } /** * Setter of Start Date * * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of startDate */ public function setStartDate($value) { $this->startDate = $value; } /** * Getter of Sub Event * * An Event that is part of this event. For example, a conference event * includes many presentations, each are a subEvent of the conference. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSubEvent() { return $this->subEvent; } /** * Setter of Sub Event * * An Event that is part of this event. For example, a conference event * includes many presentations, each are a subEvent of the conference. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of subEvent */ public function setSubEvent($value) { $this->subEvent = $value; } /** * Getter of Sub Events * * Events that are a part of this event. For example, a conference event * includes many presentations, each are subEvents of the conference * (legacy spelling; see singular form, subEvent). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getSubEvents() { return $this->subEvents; } /** * Setter of Sub Events * * Events that are a part of this event. For example, a conference event * includes many presentations, each are subEvents of the conference * (legacy spelling; see singular form, subEvent). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of subEvents */ public function setSubEvents($value) { $this->subEvents = $value; } /** * Add subEvent to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of subEvent */ public function addSubEvent($value) { $this->subEvents[] = $value; } /** * Remove subEvent to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of subEvent */ public function removeSubEvent($value) { $key = array_search($value, $this->subEvents); if($key !== false) { unset($this->subEvents[$key]); } } /** * Getter of Super Event * * An event that this event is a part of. For example, a collection of * individual music performances might each have a music festival as * their superEvent. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSuperEvent() { return $this->superEvent; } /** * Setter of Super Event * * An event that this event is a part of. For example, a collection of * individual music performances might each have a music festival as * their superEvent. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of superEvent */ public function setSuperEvent($value) { $this->superEvent = $value; } } <file_sep>/GenerationSamples/ThingBundle/Model/AbstractThing.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ThingBundle\Model; use SchemaRepository\Bundle\ThingBundle\Model\ThingInterface; /** * Model of Thing * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ThingBundle\Model */ abstract class AbstractThing implements ThingInterface { /** * Id of the model * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long */ protected $id; /** * An additional type for the item, typically used for adding more * specific types from external vocabularies in microdata syntax. This is * a relationship between something and a class that the thing is in. In * RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' * attribute - for multiple types. Schema.org tools may have only weaker * understanding of extra types, in particular those defined externally. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $additionalType; /** * A short description of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $description; /** * URL of an image of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $image; /** * The name of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $name; /** * URL of a reference Web page that unambiguously indicates the item's * identity. E.g. the URL of the item's Wikipedia page, Freebase page, or * official website. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $sameAs; /** * URL of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $url; /** * Getter of Id * * Id of the model * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long */ public function getId() { return $this->id; } /** * Setter of Id * * Id of the model * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long $value Value of id */ public function setId($value) { $this->id = $value; } /** * Getter of Additional Type * * An additional type for the item, typically used for adding more * specific types from external vocabularies in microdata syntax. This is * a relationship between something and a class that the thing is in. In * RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' * attribute - for multiple types. Schema.org tools may have only weaker * understanding of extra types, in particular those defined externally. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAdditionalType() { return $this->additionalType; } /** * Setter of Additional Type * * An additional type for the item, typically used for adding more * specific types from external vocabularies in microdata syntax. This is * a relationship between something and a class that the thing is in. In * RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' * attribute - for multiple types. Schema.org tools may have only weaker * understanding of extra types, in particular those defined externally. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of additionalType */ public function setAdditionalType($value) { $this->additionalType = $value; } /** * Getter of Description * * A short description of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDescription() { return $this->description; } /** * Setter of Description * * A short description of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of description */ public function setDescription($value) { $this->description = $value; } /** * Getter of Image * * URL of an image of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getImage() { return $this->image; } /** * Setter of Image * * URL of an image of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of image */ public function setImage($value) { $this->image = $value; } /** * Getter of Name * * The name of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getName() { return $this->name; } /** * Setter of Name * * The name of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of name */ public function setName($value) { $this->name = $value; } /** * Getter of Same As * * URL of a reference Web page that unambiguously indicates the item's * identity. E.g. the URL of the item's Wikipedia page, Freebase page, or * official website. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSameAs() { return $this->sameAs; } /** * Setter of Same As * * URL of a reference Web page that unambiguously indicates the item's * identity. E.g. the URL of the item's Wikipedia page, Freebase page, or * official website. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of sameAs */ public function setSameAs($value) { $this->sameAs = $value; } /** * Getter of URL * * URL of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getUrl() { return $this->url; } /** * Setter of URL * * URL of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of url */ public function setUrl($value) { $this->url = $value; } } <file_sep>/GenerationSamples/InfectiousDiseaseBundle/Model/InfectiousDiseaseInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\InfectiousDiseaseBundle\Model; use SchemaRepository\Bundle\MedicalConditionBundle\Model\MedicalConditionInterface; /** * Interface of Infectious Disease Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\InfectiousDiseaseBundle\Model */ interface InfectiousDiseaseInterface extends MedicalConditionInterface { /** * Getter of Infectious Agent * * The actual infectious agent, such as a specific bacterium. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInfectiousAgent(); /** * Getter of Infectious Agent Class * * The class of infectious agent (bacteria, prion, etc.) that causes the * disease. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInfectiousAgentClass(); /** * Getter of Transmission Method * * How the disease spreads, either as a route or vector, for example * 'direct contact', 'Aedes aegypti', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTransmissionMethod(); } <file_sep>/GenerationSamples/NewsArticleBundle/Model/AbstractNewsArticle.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\NewsArticleBundle\Model; use SchemaRepository\Bundle\NewsArticleBundle\Model\NewsArticleInterface; use SchemaRepository\Bundle\ArticleBundle\Model\AbstractArticle; /** * Model of News Article * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\NewsArticleBundle\Model */ abstract class AbstractNewsArticle extends AbstractArticle implements NewsArticleInterface { /** * The location where the NewsArticle was produced. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $dateline; /** * The number of the column in which the NewsArticle appears in the print * edition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $printColumn; /** * The edition of the print product in which the NewsArticle appears. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $printEdition; /** * If this NewsArticle appears in print, this field indicates the name of * the page on which the article is found. Please note that this field is * intended for the exact page name (e.g. A5, B18). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $printPage; /** * If this NewsArticle appears in print, this field indicates the print * section in which the article appeared. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $printSection; /** * Getter of Dateline * * The location where the NewsArticle was produced. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDateline() { return $this->dateline; } /** * Setter of Dateline * * The location where the NewsArticle was produced. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of dateline */ public function setDateline($value) { $this->dateline = $value; } /** * Getter of Print Column * * The number of the column in which the NewsArticle appears in the print * edition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPrintColumn() { return $this->printColumn; } /** * Setter of Print Column * * The number of the column in which the NewsArticle appears in the print * edition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of printColumn */ public function setPrintColumn($value) { $this->printColumn = $value; } /** * Getter of Print Edition * * The edition of the print product in which the NewsArticle appears. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPrintEdition() { return $this->printEdition; } /** * Setter of Print Edition * * The edition of the print product in which the NewsArticle appears. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of printEdition */ public function setPrintEdition($value) { $this->printEdition = $value; } /** * Getter of Print Page * * If this NewsArticle appears in print, this field indicates the name of * the page on which the article is found. Please note that this field is * intended for the exact page name (e.g. A5, B18). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPrintPage() { return $this->printPage; } /** * Setter of Print Page * * If this NewsArticle appears in print, this field indicates the name of * the page on which the article is found. Please note that this field is * intended for the exact page name (e.g. A5, B18). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of printPage */ public function setPrintPage($value) { $this->printPage = $value; } /** * Getter of Print Section * * If this NewsArticle appears in print, this field indicates the print * section in which the article appeared. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPrintSection() { return $this->printSection; } /** * Setter of Print Section * * If this NewsArticle appears in print, this field indicates the print * section in which the article appeared. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of printSection */ public function setPrintSection($value) { $this->printSection = $value; } } <file_sep>/GenerationSamples/TVSeasonBundle/Model/TVSeasonInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\TVSeasonBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of TV Season Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\TVSeasonBundle\Model */ interface TVSeasonInterface extends CreativeWorkInterface { /** * Getter of End Date * * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEndDate(); /** * Getter of Episode * * An episode of a TV series or season. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEpisode(); /** * Getter of Episodes * * The episode of a TV series or season (legacy spelling; see singular * form, episode). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEpisodes(); /** * Getter of Number of Episodes * * The number of episodes in this season or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getNumberOfEpisodes(); /** * Getter of Part of TV Series * * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfTVSeries(); /** * Getter of Season Number * * The season number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long */ public function getSeasonNumber(); /** * Getter of Start Date * * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStartDate(); /** * Getter of Trailer * * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrailer(); } <file_sep>/generator.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ include 'functions.php'; $schema = loadSchema(); $properties = array(); $classtypes = array('' => null); $properties['id'] = new Property('id', 'Id', array('Integer'), 'Id of the model'); foreach ($schema as $key => $value) { switch ($key) { case "properties": foreach($value as $name => $element) { $properties[$name] = new Property($name, $element->label, $element->ranges, $element->comment_plain); } break; case "types": foreach($value as $name => $element) { $parentName = (count($element->ancestors))?$element->ancestors[count($element->ancestors) - 1]:''; $classtypes[$name] = new ClassType($name, $element->label, $element->comment_plain, $parentName, array_merge(array('id'), $element->properties)); } break; } } foreach($classtypes as $classtype) { if($classtype !== null) { $classtype->setParent($classtypes[$classtype->getParentName()]); } } foreach($properties as $property) { foreach($classtypes as $classtype) { if($classtype !== null) { $classtype->addProperty($property); } } } deleteDir(__DIR__ . '/Bundle'); foreach($classtypes as $classtype) { if($classtype !== null) { $mode = 0775; $path = __DIR__ . '/Bundle/' . $classtype->getName() . 'Bundle/'; mkdir($path.'Controler', $mode, true); mkdir($path.'DependencyInjection', $mode, true); mkdir($path.'Doctrine', $mode, true); mkdir($path.'Document', $mode, true); mkdir($path.'Entity', $mode, true); mkdir($path.'Form/Type', $mode, true); mkdir($path.'Model', $mode, true); mkdir($path.'Ressources/config', $mode, true); mkdir($path.'Ressources/translations', $mode, true); mkdir($path.'Tests', $mode, true); file_put_contents($path . '/Model/' . $classtype->getName() . 'Interface.php', $classtype->getInterface()); file_put_contents($path . '/Model/Abstract' . $classtype->getName() . '.php', $classtype->getModel()); file_put_contents($path . '/Document/Abstract' . $classtype->getName() . '.php', $classtype->getDocument()); file_put_contents($path . '/Entity/Abstract' . $classtype->getName() . '.php', $classtype->getEntity()); file_put_contents($path . '/Ressources/translations/form.en.yml', $classtype->getFormLanguage()); } } echo "\n\nBundles Generated! look into " . __DIR__."/Bundle/ \n\n";<file_sep>/GenerationSamples/DrugCostBundle/Model/DrugCostInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DrugCostBundle\Model; use SchemaRepository\Bundle\MedicalIntangibleBundle\Model\MedicalIntangibleInterface; /** * Interface of Drug Cost Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DrugCostBundle\Model */ interface DrugCostInterface extends MedicalIntangibleInterface { /** * Getter of Applicable Location * * The location in which the status applies. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getApplicableLocation(); /** * Getter of Cost Category * * The category of cost, such as wholesale, retail, reimbursement cap, * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCostCategory(); /** * Getter of Cost Currency * * The currency (in 3-letter ISO 4217 format) of the drug cost. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCostCurrency(); /** * Getter of Cost Origin * * Additional details to capture the origin of the cost data. For * example, 'Medicare Part B'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCostOrigin(); /** * Getter of Cost Per Unit * * The cost per unit of the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal|string */ public function getCostPerUnit(); /** * Getter of Drug Unit * * The unit in which the drug is measured, e.g. '5 mg tablet'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDrugUnit(); } <file_sep>/GenerationSamples/DietBundle/Model/AbstractDiet.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DietBundle\Model; use SchemaRepository\Bundle\DietBundle\Model\DietInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of Diet * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DietBundle\Model */ abstract class AbstractDiet extends AbstractCreativeWork implements DietInterface { /** * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $adverseOutcome; /** * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $alternateName; /** * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $code; /** * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $contraindication; /** * Nutritional information specific to the dietary plan. May include * dietary recommendations on what foods to avoid, what foods to consume, * and specific alterations/deviations from the USDA or other regulatory * body's approved dietary guidelines. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $dietFeatures; /** * A therapy that duplicates or overlaps this one. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $duplicateTherapy; /** * People or organizations that endorse the plan. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $endorsers; /** * Medical expert advice related to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $expertConsiderations; /** * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $guideline; /** * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $indication; /** * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $medicineSystem; /** * Descriptive information establishing the overarching theory/philosophy * of the plan. May include the rationale for the name, the population * where the plan first came to prominence, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $overview; /** * Specific physiologic benefits associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $physiologicalBenefits; /** * Proprietary name given to the diet plan, typically by its originator * or creator. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $proprietaryName; /** * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $recognizingAuthority; /** * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relevantSpecialty; /** * Specific physiologic risks associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $risks; /** * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $seriousAdverseOutcome; /** * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $study; /** * Getter of Adverse Outcome * * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAdverseOutcome() { return $this->adverseOutcome; } /** * Setter of Adverse Outcome * * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>.fr> * * @access public * * @param mixed $value Value of adverseOutcome */ public function setAdverseOutcome($value) { $this->adverseOutcome = $value; } /** * Getter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlternateName() { return $this->alternateName; } /** * Setter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of alternateName */ public function setAlternateName($value) { $this->alternateName = $value; } /** * Getter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCode() { return $this->code; } /** * Setter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of code */ public function setCode($value) { $this->code = $value; } /** * Getter of Contraindication * * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContraindication() { return $this->contraindication; } /** * Setter of Contraindication * * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contraindication */ public function setContraindication($value) { $this->contraindication = $value; } /** * Getter of Diet Features * * Nutritional information specific to the dietary plan. May include * dietary recommendations on what foods to avoid, what foods to consume, * and specific alterations/deviations from the USDA or other regulatory * body's approved dietary guidelines. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDietFeatures() { return $this->dietFeatures; } /** * Setter of Diet Features * * Nutritional information specific to the dietary plan. May include * dietary recommendations on what foods to avoid, what foods to consume, * and specific alterations/deviations from the USDA or other regulatory * body's approved dietary guidelines. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of dietFeatures */ public function setDietFeatures($value) { $this->dietFeatures = $value; } /** * Getter of Duplicate Therapy * * A therapy that duplicates or overlaps this one. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDuplicateTherapy() { return $this->duplicateTherapy; } /** * Setter of Duplicate Therapy * * A therapy that duplicates or overlaps this one. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of duplicateTherapy */ public function setDuplicateTherapy($value) { $this->duplicateTherapy = $value; } /** * Getter of Endorsers * * People or organizations that endorse the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEndorsers() { return $this->endorsers; } /** * Setter of Endorsers * * People or organizations that endorse the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of endorsers */ public function setEndorsers($value) { $this->endorsers = $value; } /** * Getter of Expert Considerations * * Medical expert advice related to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExpertConsiderations() { return $this->expertConsiderations; } /** * Setter of Expert Considerations * * Medical expert advice related to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of expertConsiderations */ public function setExpertConsiderations($value) { $this->expertConsiderations = $value; } /** * Getter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGuideline() { return $this->guideline; } /** * Setter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of guideline */ public function setGuideline($value) { $this->guideline = $value; } /** * Getter of Indication * * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIndication() { return $this->indication; } /** * Setter of Indication * * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of indication */ public function setIndication($value) { $this->indication = $value; } /** * Getter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMedicineSystem() { return $this->medicineSystem; } /** * Setter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of medicineSystem */ public function setMedicineSystem($value) { $this->medicineSystem = $value; } /** * Getter of Overview * * Descriptive information establishing the overarching theory/philosophy * of the plan. May include the rationale for the name, the population * where the plan first came to prominence, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOverview() { return $this->overview; } /** * Setter of Overview * * Descriptive information establishing the overarching theory/philosophy * of the plan. May include the rationale for the name, the population * where the plan first came to prominence, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of overview */ public function setOverview($value) { $this->overview = $value; } /** * Getter of Physiological Benefits * * Specific physiologic benefits associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPhysiologicalBenefits() { return $this->physiologicalBenefits; } /** * Setter of Physiological Benefits * * Specific physiologic benefits associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of physiologicalBenefits */ public function setPhysiologicalBenefits($value) { $this->physiologicalBenefits = $value; } /** * Getter of Proprietary Name * * Proprietary name given to the diet plan, typically by its originator * or creator. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getProprietaryName() { return $this->proprietaryName; } /** * Setter of Proprietary Name * * Proprietary name given to the diet plan, typically by its originator * or creator. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of proprietaryName */ public function setProprietaryName($value) { $this->proprietaryName = $value; } /** * Getter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRecognizingAuthority() { return $this->recognizingAuthority; } /** * Setter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of recognizingAuthority */ public function setRecognizingAuthority($value) { $this->recognizingAuthority = $value; } /** * Getter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelevantSpecialty() { return $this->relevantSpecialty; } /** * Setter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relevantSpecialty */ public function setRelevantSpecialty($value) { $this->relevantSpecialty = $value; } /** * Getter of Risks * * Specific physiologic risks associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRisks() { return $this->risks; } /** * Setter of Risks * * Specific physiologic risks associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of risks */ public function setRisks($value) { $this->risks = $value; } /** * Getter of Serious Adverse Outcome * * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeriousAdverseOutcome() { return $this->seriousAdverseOutcome; } /** * Setter of Serious Adverse Outcome * * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of seriousAdverseOutcome */ public function setSeriousAdverseOutcome($value) { $this->seriousAdverseOutcome = $value; } /** * Getter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudy() { return $this->study; } /** * Setter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of study */ public function setStudy($value) { $this->study = $value; } } <file_sep>/GenerationSamples/DrugBundle/Model/AbstractDrug.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DrugBundle\Model; use SchemaRepository\Bundle\DrugBundle\Model\DrugInterface; use SchemaRepository\Bundle\MedicalTherapyBundle\Model\AbstractMedicalTherapy; /** * Model of Drug * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DrugBundle\Model */ abstract class AbstractDrug extends AbstractMedicalTherapy implements DrugInterface { /** * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $activeIngredient; /** * A route by which this drug may be administered, e.g. 'oral'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $administrationRoute; /** * Any precaution, guidance, contraindication, etc. related to * consumption of alcohol while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $alcoholWarning; /** * An available dosage strength for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $availableStrength; /** * Any precaution, guidance, contraindication, etc. related to this * drug's use by breastfeeding mothers. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $breastfeedingWarning; /** * Description of the absorption and elimination of drugs, including * their concentration (pharmacokinetics, pK) and biological effects * (pharmacodynamics, pD). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $clincalPharmacology; /** * Cost per unit of the drug, as reported by the source being tagged. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $cost; /** * A dosage form in which this drug/supplement is available, e.g. * 'tablet', 'suspension', 'injection'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $dosageForm; /** * A dosing schedule for the drug for a given population, either * observed, recommended, or maximum dose based on the type used. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $doseSchedule; /** * The class of drug this belongs to (e.g., statins). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $drugClass; /** * Any precaution, guidance, contraindication, etc. related to * consumption of specific foods while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $foodWarning; /** * Another drug that is known to interact with this drug in a way that * impacts the effect of this drug or causes a risk to the patient. Note: * disease interactions are typically captured as contraindications. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $interactingDrug; /** * True if the drug is available in a generic form (regardless of name). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var bool */ protected $isAvailableGenerically; /** * True if this item's name is a proprietary/brand name (vs. generic * name). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var bool */ protected $isProprietary; /** * Link to the drug's label details. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $labelDetails; /** * The drug or supplement's legal status, including any controlled * substance schedules that apply. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $legalStatus; /** * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $manufacturer; /** * The specific biochemical interaction through which this drug or * supplement produces its pharmacological effect. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $mechanismOfAction; /** * The generic name of this drug or supplement. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $nonProprietaryName; /** * Any information related to overdose on a drug, including signs or * symptoms, treatments, contact information for emergency response. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $overdosage; /** * Pregnancy category of this drug. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $pregnancyCategory; /** * Any precaution, guidance, contraindication, etc. related to this * drug's use during pregnancy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $pregnancyWarning; /** * Link to prescribing information for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $prescribingInfo; /** * Indicates whether this drug is available by prescription or * over-the-counter. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $prescriptionStatus; /** * Any other drug related to this one, for example commonly-prescribed * alternatives. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relatedDrug; /** * Any FDA or other warnings about the drug (text or URL). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $warning; /** * Getter of Active Ingredient * * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getActiveIngredient() { return $this->activeIngredient; } /** * Setter of Active Ingredient * * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of activeIngredient */ public function setActiveIngredient($value) { $this->activeIngredient = $value; } /** * Getter of Administration Route * * A route by which this drug may be administered, e.g. 'oral'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAdministrationRoute() { return $this->administrationRoute; } /** * Setter of Administration Route * * A route by which this drug may be administered, e.g. 'oral'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of administrationRoute */ public function setAdministrationRoute($value) { $this->administrationRoute = $value; } /** * Getter of Alcohol Warning * * Any precaution, guidance, contraindication, etc. related to * consumption of alcohol while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlcoholWarning() { return $this->alcoholWarning; } /** * Setter of Alcohol Warning * * Any precaution, guidance, contraindication, etc. related to * consumption of alcohol while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of alcoholWarning */ public function setAlcoholWarning($value) { $this->alcoholWarning = $value; } /** * Getter of Available Strength * * An available dosage strength for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailableStrength() { return $this->availableStrength; } /** * Setter of Available Strength * * An available dosage strength for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of availableStrength */ public function setAvailableStrength($value) { $this->availableStrength = $value; } /** * Getter of Breastfeeding Warning * * Any precaution, guidance, contraindication, etc. related to this * drug's use by breastfeeding mothers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBreastfeedingWarning() { return $this->breastfeedingWarning; } /** * Setter of Breastfeeding Warning * * Any precaution, guidance, contraindication, etc. related to this * drug's use by breastfeeding mothers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of breastfeedingWarning */ public function setBreastfeedingWarning($value) { $this->breastfeedingWarning = $value; } /** * Getter of Clincal Pharmacology * * Description of the absorption and elimination of drugs, including * their concentration (pharmacokinetics, pK) and biological effects * (pharmacodynamics, pD). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getClincalPharmacology() { return $this->clincalPharmacology; } /** * Setter of Clincal Pharmacology * * Description of the absorption and elimination of drugs, including * their concentration (pharmacokinetics, pK) and biological effects * (pharmacodynamics, pD). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of clincalPharmacology */ public function setClincalPharmacology($value) { $this->clincalPharmacology = $value; } /** * Getter of Cost * * Cost per unit of the drug, as reported by the source being tagged. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCost() { return $this->cost; } /** * Setter of Cost * * Cost per unit of the drug, as reported by the source being tagged. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of cost */ public function setCost($value) { $this->cost = $value; } /** * Getter of Dosage Form * * A dosage form in which this drug/supplement is available, e.g. * 'tablet', 'suspension', 'injection'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDosageForm() { return $this->dosageForm; } /** * Setter of Dosage Form * * A dosage form in which this drug/supplement is available, e.g. * 'tablet', 'suspension', 'injection'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of dosageForm */ public function setDosageForm($value) { $this->dosageForm = $value; } /** * Getter of Dose Schedule * * A dosing schedule for the drug for a given population, either * observed, recommended, or maximum dose based on the type used. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDoseSchedule() { return $this->doseSchedule; } /** * Setter of Dose Schedule * * A dosing schedule for the drug for a given population, either * observed, recommended, or maximum dose based on the type used. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of doseSchedule */ public function setDoseSchedule($value) { $this->doseSchedule = $value; } /** * Getter of Drug Class * * The class of drug this belongs to (e.g., statins). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDrugClass() { return $this->drugClass; } /** * Setter of Drug Class * * The class of drug this belongs to (e.g., statins). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of drugClass */ public function setDrugClass($value) { $this->drugClass = $value; } /** * Getter of Food Warning * * Any precaution, guidance, contraindication, etc. related to * consumption of specific foods while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFoodWarning() { return $this->foodWarning; } /** * Setter of Food Warning * * Any precaution, guidance, contraindication, etc. related to * consumption of specific foods while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of foodWarning */ public function setFoodWarning($value) { $this->foodWarning = $value; } /** * Getter of Interacting Drug * * Another drug that is known to interact with this drug in a way that * impacts the effect of this drug or causes a risk to the patient. Note: * disease interactions are typically captured as contraindications. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInteractingDrug() { return $this->interactingDrug; } /** * Setter of Interacting Drug * * Another drug that is known to interact with this drug in a way that * impacts the effect of this drug or causes a risk to the patient. Note: * disease interactions are typically captured as contraindications. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of interactingDrug */ public function setInteractingDrug($value) { $this->interactingDrug = $value; } /** * Getter of Is Available Generically * * True if the drug is available in a generic form (regardless of name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getIsAvailableGenerically() { return $this->isAvailableGenerically; } /** * Setter of Is Available Generically * * True if the drug is available in a generic form (regardless of name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param bool $value Value of isAvailableGenerically */ public function setIsAvailableGenerically($value) { $this->isAvailableGenerically = $value; } /** * Getter of Is Proprietary * * True if this item's name is a proprietary/brand name (vs. generic * name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getIsProprietary() { return $this->isProprietary; } /** * Setter of Is Proprietary * * True if this item's name is a proprietary/brand name (vs. generic * name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param bool $value Value of isProprietary */ public function setIsProprietary($value) { $this->isProprietary = $value; } /** * Getter of Label Details * * Link to the drug's label details. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getLabelDetails() { return $this->labelDetails; } /** * Setter of Label Details * * Link to the drug's label details. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of labelDetails */ public function setLabelDetails($value) { $this->labelDetails = $value; } /** * Getter of Legal Status * * The drug or supplement's legal status, including any controlled * substance schedules that apply. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLegalStatus() { return $this->legalStatus; } /** * Setter of Legal Status * * The drug or supplement's legal status, including any controlled * substance schedules that apply. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of legalStatus */ public function setLegalStatus($value) { $this->legalStatus = $value; } /** * Getter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getManufacturer() { return $this->manufacturer; } /** * Setter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of manufacturer */ public function setManufacturer($value) { $this->manufacturer = $value; } /** * Getter of Mechanism of Action * * The specific biochemical interaction through which this drug or * supplement produces its pharmacological effect. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMechanismOfAction() { return $this->mechanismOfAction; } /** * Setter of Mechanism of Action * * The specific biochemical interaction through which this drug or * supplement produces its pharmacological effect. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of mechanismOfAction */ public function setMechanismOfAction($value) { $this->mechanismOfAction = $value; } /** * Getter of Non Proprietary Name * * The generic name of this drug or supplement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getNonProprietaryName() { return $this->nonProprietaryName; } /** * Setter of Non Proprietary Name * * The generic name of this drug or supplement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of nonProprietaryName */ public function setNonProprietaryName($value) { $this->nonProprietaryName = $value; } /** * Getter of Overdosage * * Any information related to overdose on a drug, including signs or * symptoms, treatments, contact information for emergency response. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOverdosage() { return $this->overdosage; } /** * Setter of Overdosage * * Any information related to overdose on a drug, including signs or * symptoms, treatments, contact information for emergency response. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of overdosage */ public function setOverdosage($value) { $this->overdosage = $value; } /** * Getter of Pregnancy Category * * Pregnancy category of this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPregnancyCategory() { return $this->pregnancyCategory; } /** * Setter of Pregnancy Category * * Pregnancy category of this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of pregnancyCategory */ public function setPregnancyCategory($value) { $this->pregnancyCategory = $value; } /** * Getter of Pregnancy Warning * * Any precaution, guidance, contraindication, etc. related to this * drug's use during pregnancy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPregnancyWarning() { return $this->pregnancyWarning; } /** * Setter of Pregnancy Warning * * Any precaution, guidance, contraindication, etc. related to this * drug's use during pregnancy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of pregnancyWarning */ public function setPregnancyWarning($value) { $this->pregnancyWarning = $value; } /** * Getter of Prescribing Info * * Link to prescribing information for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPrescribingInfo() { return $this->prescribingInfo; } /** * Setter of Prescribing Info * * Link to prescribing information for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of prescribingInfo */ public function setPrescribingInfo($value) { $this->prescribingInfo = $value; } /** * Getter of Prescription Status * * Indicates whether this drug is available by prescription or * over-the-counter. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPrescriptionStatus() { return $this->prescriptionStatus; } /** * Setter of Prescription Status * * Indicates whether this drug is available by prescription or * over-the-counter. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of prescriptionStatus */ public function setPrescriptionStatus($value) { $this->prescriptionStatus = $value; } /** * Getter of Related Drug * * Any other drug related to this one, for example commonly-prescribed * alternatives. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedDrug() { return $this->relatedDrug; } /** * Setter of Related Drug * * Any other drug related to this one, for example commonly-prescribed * alternatives. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relatedDrug */ public function setRelatedDrug($value) { $this->relatedDrug = $value; } /** * Getter of Warning * * Any FDA or other warnings about the drug (text or URL). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getWarning() { return $this->warning; } /** * Setter of Warning * * Any FDA or other warnings about the drug (text or URL). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of warning */ public function setWarning($value) { $this->warning = $value; } } <file_sep>/GenerationSamples/MedicalAudienceBundle/Model/MedicalAudienceInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalAudienceBundle\Model; use SchemaRepository\Bundle\AudienceBundle\Model\AudienceInterface; /** * Interface of Medical Audience Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalAudienceBundle\Model */ interface MedicalAudienceInterface extends AudienceInterface { /** * Getter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlternateName(); /** * Getter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCode(); /** * Getter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGuideline(); /** * Getter of Health Condition * * Expectations for health conditions of target audience * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHealthCondition(); /** * Getter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMedicineSystem(); /** * Getter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRecognizingAuthority(); /** * Getter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelevantSpecialty(); /** * Getter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudy(); /** * Getter of Suggested Gender * * The gender of the person or audience. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSuggestedGender(); /** * Getter of Suggested Max Age * * Maximal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getSuggestedMaxAge(); /** * Getter of Suggested Min Age * * Minimal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getSuggestedMinAge(); } <file_sep>/GenerationSamples/WebPageBundle/Model/AbstractWebPage.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\WebPageBundle\Model; use SchemaRepository\Bundle\WebPageBundle\Model\WebPageInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of Web Page * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\WebPageBundle\Model */ abstract class AbstractWebPage extends AbstractCreativeWork implements WebPageInterface { /** * A set of links that can help a user understand and navigate a website * hierarchy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $breadcrumb; /** * Indicates the collection or gallery to which the item belongs. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $isPartOf; /** * Date on which the content on this web page was last reviewed for * accuracy and/or completeness. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $lastReviewed; /** * Indicates if this web page element is the main subject of the page. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $mainContentOfPage; /** * Indicates the main image on the page * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $primaryImageOfPage; /** * A link related to this web page, for example to other related web * pages. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $relatedLink; /** * People or organizations that have reviewed the content on this web * page for accuracy and/or completeness. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $reviewedBy; /** * One of the more significant URLs on the page. Typically, these are the * non-navigation links that are clicked on the most. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $significantLink; /** * The most significant URLs on the page. Typically, these are the * non-navigation links that are clicked on the most (legacy spelling; * see singular form, significantLink). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $significantLinks; /** * One of the domain specialities to which this web page's content * applies. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $specialty; /** * Getter of Breadcrumb * * A set of links that can help a user understand and navigate a website * hierarchy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBreadcrumb() { return $this->breadcrumb; } /** * Setter of Breadcrumb * * A set of links that can help a user understand and navigate a website * hierarchy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of breadcrumb */ public function setBreadcrumb($value) { $this->breadcrumb = $value; } /** * Getter of Is Part of * * Indicates the collection or gallery to which the item belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsPartOf() { return $this->isPartOf; } /** * Setter of Is Part of * * Indicates the collection or gallery to which the item belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of isPartOf */ public function setIsPartOf($value) { $this->isPartOf = $value; } /** * Getter of Last Reviewed * * Date on which the content on this web page was last reviewed for * accuracy and/or completeness. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLastReviewed() { return $this->lastReviewed; } /** * Setter of Last Reviewed * * Date on which the content on this web page was last reviewed for * accuracy and/or completeness. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of lastReviewed */ public function setLastReviewed($value) { $this->lastReviewed = $value; } /** * Getter of Main Content of Page * * Indicates if this web page element is the main subject of the page. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMainContentOfPage() { return $this->mainContentOfPage; } /** * Setter of Main Content of Page * * Indicates if this web page element is the main subject of the page. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of mainContentOfPage */ public function setMainContentOfPage($value) { $this->mainContentOfPage = $value; } /** * Getter of Primary Image of Page * * Indicates the main image on the page * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPrimaryImageOfPage() { return $this->primaryImageOfPage; } /** * Setter of Primary Image of Page * * Indicates the main image on the page * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of primaryImageOfPage */ public function setPrimaryImageOfPage($value) { $this->primaryImageOfPage = $value; } /** * Getter of Related Link * * A link related to this web page, for example to other related web * pages. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRelatedLink() { return $this->relatedLink; } /** * Setter of Related Link * * A link related to this web page, for example to other related web * pages. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of relatedLink */ public function setRelatedLink($value) { $this->relatedLink = $value; } /** * Getter of Reviewed by * * People or organizations that have reviewed the content on this web * page for accuracy and/or completeness. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReviewedBy() { return $this->reviewedBy; } /** * Setter of Reviewed by * * People or organizations that have reviewed the content on this web * page for accuracy and/or completeness. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of reviewedBy */ public function setReviewedBy($value) { $this->reviewedBy = $value; } /** * Getter of Significant Link * * One of the more significant URLs on the page. Typically, these are the * non-navigation links that are clicked on the most. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSignificantLink() { return $this->significantLink; } /** * Setter of Significant Link * * One of the more significant URLs on the page. Typically, these are the * non-navigation links that are clicked on the most. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of significantLink */ public function setSignificantLink($value) { $this->significantLink = $value; } /** * Getter of Significant Links * * The most significant URLs on the page. Typically, these are the * non-navigation links that are clicked on the most (legacy spelling; * see singular form, significantLink). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSignificantLinks() { return $this->significantLinks; } /** * Setter of Significant Links * * The most significant URLs on the page. Typically, these are the * non-navigation links that are clicked on the most (legacy spelling; * see singular form, significantLink). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of significantLinks */ public function setSignificantLinks($value) { $this->significantLinks = $value; } /** * Getter of Specialty * * One of the domain specialities to which this web page's content * applies. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSpecialty() { return $this->specialty; } /** * Setter of Specialty * * One of the domain specialities to which this web page's content * applies. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of specialty */ public function setSpecialty($value) { $this->specialty = $value; } } <file_sep>/GenerationSamples/VeinBundle/Model/AbstractVein.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\VeinBundle\Model; use SchemaRepository\Bundle\VeinBundle\Model\VeinInterface; use SchemaRepository\Bundle\VesselBundle\Model\AbstractVessel; /** * Model of Vein * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\VeinBundle\Model */ abstract class AbstractVein extends AbstractVessel implements VeinInterface { /** * The vasculature that the vein drains into. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $drainsTo; /** * The anatomical or organ system drained by this vessel; generally * refers to a specific part of an organ. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $regionDrained; /** * The anatomical or organ system that the vein flows into; a larger * structure that the vein connects to. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $tributary; /** * Getter of Drains to * * The vasculature that the vein drains into. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDrainsTo() { return $this->drainsTo; } /** * Setter of Drains to * * The vasculature that the vein drains into. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of drainsTo */ public function setDrainsTo($value) { $this->drainsTo = $value; } /** * Getter of Region Drained * * The anatomical or organ system drained by this vessel; generally * refers to a specific part of an organ. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRegionDrained() { return $this->regionDrained; } /** * Setter of Region Drained * * The anatomical or organ system drained by this vessel; generally * refers to a specific part of an organ. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of regionDrained */ public function setRegionDrained($value) { $this->regionDrained = $value; } /** * Getter of Tributary * * The anatomical or organ system that the vein flows into; a larger * structure that the vein connects to. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTributary() { return $this->tributary; } /** * Setter of Tributary * * The anatomical or organ system that the vein flows into; a larger * structure that the vein connects to. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of tributary */ public function setTributary($value) { $this->tributary = $value; } } <file_sep>/README.md SchemaOrgRepository =================== Schema.org structures to models A little script to generate Models/Documents/Entities From Schema.Org (RelationShipLess), for Symfony2.3/DoctrineORM/DoctrineMongo With Interface, Inheritance, Comments, ... <file_sep>/GenerationSamples/NutritionInformationBundle/Model/NutritionInformationInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\NutritionInformationBundle\Model; use SchemaRepository\Bundle\StructuredValueBundle\Model\StructuredValueInterface; /** * Interface of Nutrition Information Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\NutritionInformationBundle\Model */ interface NutritionInformationInterface extends StructuredValueInterface { /** * Getter of Calories * * The number of calories * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCalories(); /** * Getter of Carbohydrate Content * * The number of grams of carbohydrates. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCarbohydrateContent(); /** * Getter of Cholesterol Content * * The number of milligrams of cholesterol. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCholesterolContent(); /** * Getter of Fat Content * * The number of grams of fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFatContent(); /** * Getter of Fiber Content * * The number of grams of fiber. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFiberContent(); /** * Getter of Protein Content * * The number of grams of protein. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProteinContent(); /** * Getter of Saturated Fat Content * * The number of grams of saturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSaturatedFatContent(); /** * Getter of Serving Size * * The serving size, in terms of the number of volume or mass * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getServingSize(); /** * Getter of Sodium Content * * The number of milligrams of sodium. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSodiumContent(); /** * Getter of Sugar Content * * The number of grams of sugar. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSugarContent(); /** * Getter of Trans Fat Content * * The number of grams of trans fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTransFatContent(); /** * Getter of Unsaturated Fat Content * * The number of grams of unsaturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getUnsaturatedFatContent(); } <file_sep>/GenerationSamples/MovieTheaterBundle/Model/AbstractMovieTheater.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MovieTheaterBundle\Model; use SchemaRepository\Bundle\MovieTheaterBundle\Model\MovieTheaterInterface; use SchemaRepository\Bundle\CivicStructureBundle\Model\AbstractCivicStructure; /** * Model of Movie Theater * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MovieTheaterBundle\Model */ abstract class AbstractMovieTheater extends AbstractCivicStructure implements MovieTheaterInterface { /** * The larger organization that this local business is a branch of, if * any. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $branchOf; /** * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $brand; /** * A contact point for a person or organization. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $contactPoint; /** * A contact point for a person or organization (legacy spelling; see * singular form, contactPoint). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $contactPoints; /** * The currency accepted (in ISO 4217 currency format). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $currenciesAccepted; /** * The Dun & Bradstreet DUNS number for identifying an organization or * business person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $duns; /** * Email address. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $email; /** * Someone working for this organization. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $employee; /** * People working for this organization. (legacy spelling; see singular * form, employee) * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $employees; /** * A person who founded this organization. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $founder; /** * A person who founded this organization (legacy spelling; see singular * form, founder). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $founders; /** * The date that this organization was founded. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $foundingDate; /** * Points-of-Sales operated by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $hasPOS; /** * The official name of the organization, e.g. the registered company * name. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $legalName; /** * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $location; /** * A pointer to products or services offered by the organization or * person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $makesOffer; /** * A member of this organization. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $member; /** * A member of this organization (legacy spelling; see singular form, * member). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $members; /** * The North American Industry Classification System (NAICS) code for a * particular organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $naics; /** * Products owned by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $owns; /** * Cash, credit card, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $paymentAccepted; /** * The price range of the business, for example $$$. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $priceRange; /** * A pointer to products or services sought by the organization or person * (demand). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $seeks; /** * The Tax / Fiscal ID of the organization or person, e.g. the TIN in the * US or the CIF/NIF in Spain. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $taxID; /** * The Value-added Tax ID of the organisation or person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $vatID; /** * Getter of Branch of * * The larger organization that this local business is a branch of, if * any. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBranchOf() { return $this->branchOf; } /** * Setter of Branch of * * The larger organization that this local business is a branch of, if * any. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of branchOf */ public function setBranchOf($value) { $this->branchOf = $value; } /** * Getter of Brand * * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBrand() { return $this->brand; } /** * Setter of Brand * * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of brand */ public function setBrand($value) { $this->brand = $value; } /** * Getter of Contact Point * * A contact point for a person or organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContactPoint() { return $this->contactPoint; } /** * Setter of Contact Point * * A contact point for a person or organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contactPoint */ public function setContactPoint($value) { $this->contactPoint = $value; } /** * Getter of Contact Points * * A contact point for a person or organization (legacy spelling; see * singular form, contactPoint). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getContactPoints() { return $this->contactPoints; } /** * Setter of Contact Points * * A contact point for a person or organization (legacy spelling; see * singular form, contactPoint). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of contactPoints */ public function setContactPoints($value) { $this->contactPoints = $value; } /** * Add contactPoint to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contactPoint */ public function addContactPoint($value) { $this->contactPoints[] = $value; } /** * Remove contactPoint to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contactPoint */ public function removeContactPoint($value) { $key = array_search($value, $this->contactPoints); if($key !== false) { unset($this->contactPoints[$key]); } } /** * Getter of Currencies Accepted * * The currency accepted (in ISO 4217 currency format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCurrenciesAccepted() { return $this->currenciesAccepted; } /** * Setter of Currencies Accepted * * The currency accepted (in ISO 4217 currency format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of currenciesAccepted */ public function setCurrenciesAccepted($value) { $this->currenciesAccepted = $value; } /** * Getter of Duns * * The Dun & Bradstreet DUNS number for identifying an organization or * business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDuns() { return $this->duns; } /** * Setter of Duns * * The Dun & Bradstreet DUNS number for identifying an organization or * business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of duns */ public function setDuns($value) { $this->duns = $value; } /** * Getter of Email * * Email address. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEmail() { return $this->email; } /** * Setter of Email * * Email address. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of email */ public function setEmail($value) { $this->email = $value; } /** * Getter of Employee * * Someone working for this organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEmployee() { return $this->employee; } /** * Setter of Employee * * Someone working for this organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of employee */ public function setEmployee($value) { $this->employee = $value; } /** * Getter of Employees * * People working for this organization. (legacy spelling; see singular * form, employee) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEmployees() { return $this->employees; } /** * Setter of Employees * * People working for this organization. (legacy spelling; see singular * form, employee) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of employees */ public function setEmployees($value) { $this->employees = $value; } /** * Add employee to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of employee */ public function addEmployee($value) { $this->employees[] = $value; } /** * Remove employee to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of employee */ public function removeEmployee($value) { $key = array_search($value, $this->employees); if($key !== false) { unset($this->employees[$key]); } } /** * Getter of Founder * * A person who founded this organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFounder() { return $this->founder; } /** * Setter of Founder * * A person who founded this organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of founder */ public function setFounder($value) { $this->founder = $value; } /** * Getter of Founders * * A person who founded this organization (legacy spelling; see singular * form, founder). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getFounders() { return $this->founders; } /** * Setter of Founders * * A person who founded this organization (legacy spelling; see singular * form, founder). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of founders */ public function setFounders($value) { $this->founders = $value; } /** * Add founder to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of founder */ public function addFounder($value) { $this->founders[] = $value; } /** * Remove founder to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of founder */ public function removeFounder($value) { $key = array_search($value, $this->founders); if($key !== false) { unset($this->founders[$key]); } } /** * Getter of Founding Date * * The date that this organization was founded. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFoundingDate() { return $this->foundingDate; } /** * Setter of Founding Date * * The date that this organization was founded. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of foundingDate */ public function setFoundingDate($value) { $this->foundingDate = $value; } /** * Getter of Has POS * * Points-of-Sales operated by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHasPOS() { return $this->hasPOS; } /** * Setter of Has POS * * Points-of-Sales operated by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of hasPOS */ public function setHasPOS($value) { $this->hasPOS = $value; } /** * Getter of Legal Name * * The official name of the organization, e.g. the registered company * name. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getLegalName() { return $this->legalName; } /** * Setter of Legal Name * * The official name of the organization, e.g. the registered company * name. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of legalName */ public function setLegalName($value) { $this->legalName = $value; } /** * Getter of Location * * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLocation() { return $this->location; } /** * Setter of Location * * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of location */ public function setLocation($value) { $this->location = $value; } /** * Getter of Makes Offer * * A pointer to products or services offered by the organization or * person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMakesOffer() { return $this->makesOffer; } /** * Setter of Makes Offer * * A pointer to products or services offered by the organization or * person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of makesOffer */ public function setMakesOffer($value) { $this->makesOffer = $value; } /** * Getter of Member * * A member of this organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMember() { return $this->member; } /** * Setter of Member * * A member of this organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of member */ public function setMember($value) { $this->member = $value; } /** * Getter of Members * * A member of this organization (legacy spelling; see singular form, * member). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getMembers() { return $this->members; } /** * Setter of Members * * A member of this organization (legacy spelling; see singular form, * member). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of members */ public function setMembers($value) { $this->members = $value; } /** * Add member to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of member */ public function addMember($value) { $this->members[] = $value; } /** * Remove member to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of member */ public function removeMember($value) { $key = array_search($value, $this->members); if($key !== false) { unset($this->members[$key]); } } /** * Getter of Naics * * The North American Industry Classification System (NAICS) code for a * particular organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getNaics() { return $this->naics; } /** * Setter of Naics * * The North American Industry Classification System (NAICS) code for a * particular organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of naics */ public function setNaics($value) { $this->naics = $value; } /** * Getter of Owns * * Products owned by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOwns() { return $this->owns; } /** * Setter of Owns * * Products owned by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of owns */ public function setOwns($value) { $this->owns = $value; } /** * Getter of Payment Accepted * * Cash, credit card, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPaymentAccepted() { return $this->paymentAccepted; } /** * Setter of Payment Accepted * * Cash, credit card, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of paymentAccepted */ public function setPaymentAccepted($value) { $this->paymentAccepted = $value; } /** * Getter of Price Range * * The price range of the business, for example $$$. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPriceRange() { return $this->priceRange; } /** * Setter of Price Range * * The price range of the business, for example $$$. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of priceRange */ public function setPriceRange($value) { $this->priceRange = $value; } /** * Getter of Seeks * * A pointer to products or services sought by the organization or person * (demand). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeeks() { return $this->seeks; } /** * Setter of Seeks * * A pointer to products or services sought by the organization or person * (demand). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of seeks */ public function setSeeks($value) { $this->seeks = $value; } /** * Getter of Tax ID * * The Tax / Fiscal ID of the organization or person, e.g. the TIN in the * US or the CIF/NIF in Spain. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTaxID() { return $this->taxID; } /** * Setter of Tax ID * * The Tax / Fiscal ID of the organization or person, e.g. the TIN in the * US or the CIF/NIF in Spain. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of taxID */ public function setTaxID($value) { $this->taxID = $value; } /** * Getter of Vat ID * * The Value-added Tax ID of the organisation or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getVatID() { return $this->vatID; } /** * Setter of Vat ID * * The Value-added Tax ID of the organisation or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of vatID */ public function setVatID($value) { $this->vatID = $value; } } <file_sep>/GenerationSamples/MedicalConditionBundle/Model/AbstractMedicalCondition.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalConditionBundle\Model; use SchemaRepository\Bundle\MedicalConditionBundle\Model\MedicalConditionInterface; use SchemaRepository\Bundle\MedicalEntityBundle\Model\AbstractMedicalEntity; /** * Model of Medical Condition * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalConditionBundle\Model */ abstract class AbstractMedicalCondition extends AbstractMedicalEntity implements MedicalConditionInterface { /** * The anatomy of the underlying organ system or structures associated * with this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $associatedAnatomy; /** * An underlying cause. More specifically, one of the causative agent(s) * that are most directly responsible for the pathophysiologic process * that eventually results in the occurrence. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $cause; /** * One of a set of differential diagnoses for the condition. * Specifically, a closely-related or competing diagnosis typically * considered later in the cognitive process whereby this medical * condition is distinguished from others most likely responsible for a * similar collection of signs and symptoms to reach the most * parsimonious diagnosis or diagnoses in a patient. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $differentialDiagnosis; /** * The characteristics of associated patients, such as age, gender, race * etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $epidemiology; /** * The likely outcome in either the short term or long term of the * medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $expectedPrognosis; /** * The expected progression of the condition if it is not treated and * allowed to progress naturally. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $naturalProgression; /** * Changes in the normal mechanical, physical, and biochemical functions * that are associated with this activity or condition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $pathophysiology; /** * A possible unexpected and unfavorable evolution of a medical * condition. Complications may include worsening of the signs or * symptoms of the disease, extension of the condition to other organ * systems, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $possibleComplication; /** * A possible treatment to address this condition, sign or symptom. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $possibleTreatment; /** * A preventative therapy used to prevent an initial occurrence of the * medical condition, such as vaccination. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $primaryPrevention; /** * A modifiable or non-modifiable factor that increases the risk of a * patient contracting this condition, e.g. age, coexisting condition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $riskFactor; /** * A preventative therapy used to prevent reoccurrence of the medical * condition after an initial episode of the condition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $secondaryPrevention; /** * A sign or symptom of this condition. Signs are objective or physically * observable manifestations of the medical condition while symptoms are * the subjective experienceof the medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $signOrSymptom; /** * The stage of the condition, if applicable. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $stage; /** * A more specific type of the condition, where applicable, for example * 'Type 1 Diabetes', 'Type 2 Diabetes', or 'Gestational Diabetes' for * Diabetes. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $subtype; /** * A medical test typically performed given this condition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $typicalTest; /** * Getter of Associated Anatomy * * The anatomy of the underlying organ system or structures associated * with this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAssociatedAnatomy() { return $this->associatedAnatomy; } /** * Setter of Associated Anatomy * * The anatomy of the underlying organ system or structures associated * with this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of associatedAnatomy */ public function setAssociatedAnatomy($value) { $this->associatedAnatomy = $value; } /** * Getter of Cause * * An underlying cause. More specifically, one of the causative agent(s) * that are most directly responsible for the pathophysiologic process * that eventually results in the occurrence. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCause() { return $this->cause; } /** * Setter of Cause * * An underlying cause. More specifically, one of the causative agent(s) * that are most directly responsible for the pathophysiologic process * that eventually results in the occurrence. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of cause */ public function setCause($value) { $this->cause = $value; } /** * Getter of Differential Diagnosis * * One of a set of differential diagnoses for the condition. * Specifically, a closely-related or competing diagnosis typically * considered later in the cognitive process whereby this medical * condition is distinguished from others most likely responsible for a * similar collection of signs and symptoms to reach the most * parsimonious diagnosis or diagnoses in a patient. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDifferentialDiagnosis() { return $this->differentialDiagnosis; } /** * Setter of Differential Diagnosis * * One of a set of differential diagnoses for the condition. * Specifically, a closely-related or competing diagnosis typically * considered later in the cognitive process whereby this medical * condition is distinguished from others most likely responsible for a * similar collection of signs and symptoms to reach the most * parsimonious diagnosis or diagnoses in a patient. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of differentialDiagnosis */ public function setDifferentialDiagnosis($value) { $this->differentialDiagnosis = $value; } /** * Getter of Epidemiology * * The characteristics of associated patients, such as age, gender, race * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEpidemiology() { return $this->epidemiology; } /** * Setter of Epidemiology * * The characteristics of associated patients, such as age, gender, race * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of epidemiology */ public function setEpidemiology($value) { $this->epidemiology = $value; } /** * Getter of Expected Prognosis * * The likely outcome in either the short term or long term of the * medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExpectedPrognosis() { return $this->expectedPrognosis; } /** * Setter of Expected Prognosis * * The likely outcome in either the short term or long term of the * medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of expectedPrognosis */ public function setExpectedPrognosis($value) { $this->expectedPrognosis = $value; } /** * Getter of Natural Progression * * The expected progression of the condition if it is not treated and * allowed to progress naturally. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getNaturalProgression() { return $this->naturalProgression; } /** * Setter of Natural Progression * * The expected progression of the condition if it is not treated and * allowed to progress naturally. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of naturalProgression */ public function setNaturalProgression($value) { $this->naturalProgression = $value; } /** * Getter of Pathophysiology * * Changes in the normal mechanical, physical, and biochemical functions * that are associated with this activity or condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPathophysiology() { return $this->pathophysiology; } /** * Setter of Pathophysiology * * Changes in the normal mechanical, physical, and biochemical functions * that are associated with this activity or condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of pathophysiology */ public function setPathophysiology($value) { $this->pathophysiology = $value; } /** * Getter of Possible Complication * * A possible unexpected and unfavorable evolution of a medical * condition. Complications may include worsening of the signs or * symptoms of the disease, extension of the condition to other organ * systems, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPossibleComplication() { return $this->possibleComplication; } /** * Setter of Possible Complication * * A possible unexpected and unfavorable evolution of a medical * condition. Complications may include worsening of the signs or * symptoms of the disease, extension of the condition to other organ * systems, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of possibleComplication */ public function setPossibleComplication($value) { $this->possibleComplication = $value; } /** * Getter of Possible Treatment * * A possible treatment to address this condition, sign or symptom. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPossibleTreatment() { return $this->possibleTreatment; } /** * Setter of Possible Treatment * * A possible treatment to address this condition, sign or symptom. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of possibleTreatment */ public function setPossibleTreatment($value) { $this->possibleTreatment = $value; } /** * Getter of Primary Prevention * * A preventative therapy used to prevent an initial occurrence of the * medical condition, such as vaccination. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPrimaryPrevention() { return $this->primaryPrevention; } /** * Setter of Primary Prevention * * A preventative therapy used to prevent an initial occurrence of the * medical condition, such as vaccination. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of primaryPrevention */ public function setPrimaryPrevention($value) { $this->primaryPrevention = $value; } /** * Getter of Risk Factor * * A modifiable or non-modifiable factor that increases the risk of a * patient contracting this condition, e.g. age, coexisting condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRiskFactor() { return $this->riskFactor; } /** * Setter of Risk Factor * * A modifiable or non-modifiable factor that increases the risk of a * patient contracting this condition, e.g. age, coexisting condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of riskFactor */ public function setRiskFactor($value) { $this->riskFactor = $value; } /** * Getter of Secondary Prevention * * A preventative therapy used to prevent reoccurrence of the medical * condition after an initial episode of the condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSecondaryPrevention() { return $this->secondaryPrevention; } /** * Setter of Secondary Prevention * * A preventative therapy used to prevent reoccurrence of the medical * condition after an initial episode of the condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of secondaryPrevention */ public function setSecondaryPrevention($value) { $this->secondaryPrevention = $value; } /** * Getter of Sign or Symptom * * A sign or symptom of this condition. Signs are objective or physically * observable manifestations of the medical condition while symptoms are * the subjective experienceof the medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSignOrSymptom() { return $this->signOrSymptom; } /** * Setter of Sign or Symptom * * A sign or symptom of this condition. Signs are objective or physically * observable manifestations of the medical condition while symptoms are * the subjective experienceof the medical condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of signOrSymptom */ public function setSignOrSymptom($value) { $this->signOrSymptom = $value; } /** * Getter of Stage * * The stage of the condition, if applicable. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStage() { return $this->stage; } /** * Setter of Stage * * The stage of the condition, if applicable. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of stage */ public function setStage($value) { $this->stage = $value; } /** * Getter of Subtype * * A more specific type of the condition, where applicable, for example * 'Type 1 Diabetes', 'Type 2 Diabetes', or 'Gestational Diabetes' for * Diabetes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSubtype() { return $this->subtype; } /** * Setter of Subtype * * A more specific type of the condition, where applicable, for example * 'Type 1 Diabetes', 'Type 2 Diabetes', or 'Gestational Diabetes' for * Diabetes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of subtype */ public function setSubtype($value) { $this->subtype = $value; } /** * Getter of Typical Test * * A medical test typically performed given this condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTypicalTest() { return $this->typicalTest; } /** * Setter of Typical Test * * A medical test typically performed given this condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of typicalTest */ public function setTypicalTest($value) { $this->typicalTest = $value; } } <file_sep>/GenerationSamples/ThingBundle/Model/ThingInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ThingBundle\Model; /** * Interface of Thing Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ThingBundle\Model */ interface ThingInterface { /** * Getter of Id * * Id of the model * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long */ public function getId(); /** * Getter of Additional Type * * An additional type for the item, typically used for adding more * specific types from external vocabularies in microdata syntax. This is * a relationship between something and a class that the thing is in. In * RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' * attribute - for multiple types. Schema.org tools may have only weaker * understanding of extra types, in particular those defined externally. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAdditionalType(); /** * Getter of Description * * A short description of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDescription(); /** * Getter of Image * * URL of an image of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getImage(); /** * Getter of Name * * The name of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getName(); /** * Getter of Same As * * URL of a reference Web page that unambiguously indicates the item's * identity. E.g. the URL of the item's Wikipedia page, Freebase page, or * official website. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSameAs(); /** * Getter of URL * * URL of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getUrl(); } <file_sep>/GenerationSamples/DietBundle/Model/DietInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DietBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of Diet Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DietBundle\Model */ interface DietInterface extends CreativeWorkInterface { /** * Getter of Adverse Outcome * * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAdverseOutcome(); /** * Getter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlternateName(); /** * Getter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCode(); /** * Getter of Contraindication * * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContraindication(); /** * Getter of Diet Features * * Nutritional information specific to the dietary plan. May include * dietary recommendations on what foods to avoid, what foods to consume, * and specific alterations/deviations from the USDA or other regulatory * body's approved dietary guidelines. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDietFeatures(); /** * Getter of Duplicate Therapy * * A therapy that duplicates or overlaps this one. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDuplicateTherapy(); /** * Getter of Endorsers * * People or organizations that endorse the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEndorsers(); /** * Getter of Expert Considerations * * Medical expert advice related to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExpertConsiderations(); /** * Getter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGuideline(); /** * Getter of Indication * * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIndication(); /** * Getter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMedicineSystem(); /** * Getter of Overview * * Descriptive information establishing the overarching theory/philosophy * of the plan. May include the rationale for the name, the population * where the plan first came to prominence, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOverview(); /** * Getter of Physiological Benefits * * Specific physiologic benefits associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPhysiologicalBenefits(); /** * Getter of Proprietary Name * * Proprietary name given to the diet plan, typically by its originator * or creator. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getProprietaryName(); /** * Getter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRecognizingAuthority(); /** * Getter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelevantSpecialty(); /** * Getter of Risks * * Specific physiologic risks associated to the plan. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRisks(); /** * Getter of Serious Adverse Outcome * * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeriousAdverseOutcome(); /** * Getter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudy(); } <file_sep>/GenerationSamples/PlaceBundle/Model/PlaceInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\PlaceBundle\Model; use SchemaRepository\Bundle\ThingBundle\Model\ThingInterface; /** * Interface of Place Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\PlaceBundle\Model */ interface PlaceInterface extends ThingInterface { /** * Getter of Address * * Physical address of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAddress(); /** * Getter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAggregateRating(); /** * Getter of Contained in * * The basic containment relation between places. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContainedIn(); /** * Getter of Event * * Upcoming or past event associated with this place or organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEvent(); /** * Getter of Events * * Upcoming or past events associated with this place or organization * (legacy spelling; see singular form, event). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEvents(); /** * Getter of Fax Number * * The fax number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFaxNumber(); /** * Getter of Geo * * The geo coordinates of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGeo(); /** * Getter of Global Location Number * * The Global Location Number (GLN, sometimes also referred to as * International Location Number or ILN) of the respective organization, * person, or place. The GLN is a 13-digit number used to identify * parties and physical locations. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGlobalLocationNumber(); /** * Getter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInteractionCount(); /** * Getter of Isic V4 * * The International Standard of Industrial Classification of All * Economic Activities (ISIC), Revision 4 code for a particular * organization, business person, or place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIsicV4(); /** * Getter of Logo * * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLogo(); /** * Getter of Map * * A URL to a map of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMap(); /** * Getter of Maps * * A URL to a map of the place (legacy spelling; see singular form, map). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMaps(); /** * Getter of Opening Hours Specification * * The opening hours of a certain place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOpeningHoursSpecification(); /** * Getter of Photo * * A photograph of this place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPhoto(); /** * Getter of Photos * * Photographs of this place (legacy spelling; see singular form, photo). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getPhotos(); /** * Getter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReview(); /** * Getter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getReviews(); /** * Getter of Telephone * * The telephone number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTelephone(); } <file_sep>/GenerationSamples/ActionBundle/Model/ActionInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ActionBundle\Model; use SchemaRepository\Bundle\ThingBundle\Model\ThingInterface; /** * Interface of Action Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ActionBundle\Model */ interface ActionInterface extends ThingInterface { /** * Getter of Agent * * The direct performer or driver of the action (animate or inanimate). * e.g. *John* wrote a book. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAgent(); /** * Getter of End Time * * When the Action was performed: end time. This is for actions that span * a period of time. e.g. John wrote a book from January to *December*. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getEndTime(); /** * Getter of Instrument * * The object that helped the agent perform the action. e.g. John wrote a * book with *a pen*. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInstrument(); /** * Getter of Location * * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLocation(); /** * Getter of Object * * The object upon the action is carried out, whose state is kept intact * or changed. Also known as the semantic roles patient, affected or * undergoer (which change their state) or theme (which doesn't). e.g. * John read *a book*. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getObject(); /** * Getter of Participant * * Other co-agents that participated in the action indirectly. e.g. John * wrote a book with *Steve*. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getParticipant(); /** * Getter of Result * * The result produced in the action. e.g. John wrote *a book*. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getResult(); /** * Getter of Start Time * * When the Action was performed: start time. This is for actions that * span a period of time. e.g. John wrote a book from *January* to * December. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getStartTime(); } <file_sep>/GenerationSamples/BlogBundle/Model/AbstractBlog.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\BlogBundle\Model; use SchemaRepository\Bundle\BlogBundle\Model\BlogInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of Blog * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\BlogBundle\Model */ abstract class AbstractBlog extends AbstractCreativeWork implements BlogInterface { /** * A posting that is part of this blog. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $blogPost; /** * The postings that are part of this blog (legacy spelling; see singular * form, blogPost). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $blogPosts; /** * Getter of Blog Post * * A posting that is part of this blog. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBlogPost() { return $this->blogPost; } /** * Setter of Blog Post * * A posting that is part of this blog. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of blogPost */ public function setBlogPost($value) { $this->blogPost = $value; } /** * Getter of Blog Posts * * The postings that are part of this blog (legacy spelling; see singular * form, blogPost). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getBlogPosts() { return $this->blogPosts; } /** * Setter of Blog Posts * * The postings that are part of this blog (legacy spelling; see singular * form, blogPost). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of blogPosts */ public function setBlogPosts($value) { $this->blogPosts = $value; } /** * Add blogPost to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of blogPost */ public function addBlogPost($value) { $this->blogPosts[] = $value; } /** * Remove blogPost to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of blogPost */ public function removeBlogPost($value) { $key = array_search($value, $this->blogPosts); if($key !== false) { unset($this->blogPosts[$key]); } } } <file_sep>/GenerationSamples/MusicPlaylistBundle/Model/AbstractMusicPlaylist.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MusicPlaylistBundle\Model; use SchemaRepository\Bundle\MusicPlaylistBundle\Model\MusicPlaylistInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of Music Playlist * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MusicPlaylistBundle\Model */ abstract class AbstractMusicPlaylist extends AbstractCreativeWork implements MusicPlaylistInterface { /** * The number of tracks in this album or playlist. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long */ protected $numTracks; /** * A music recording (track)—usually a single song. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $track; /** * A music recording (track)—usually a single song (legacy spelling; * see singular form, track). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $tracks; /** * Getter of Num Tracks * * The number of tracks in this album or playlist. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long */ public function getNumTracks() { return $this->numTracks; } /** * Setter of Num Tracks * * The number of tracks in this album or playlist. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long $value Value of numTracks */ public function setNumTracks($value) { $this->numTracks = $value; } /** * Getter of Track * * A music recording (track)—usually a single song. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrack() { return $this->track; } /** * Setter of Track * * A music recording (track)—usually a single song. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of track */ public function setTrack($value) { $this->track = $value; } /** * Getter of Tracks * * A music recording (track)—usually a single song (legacy spelling; * see singular form, track). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getTracks() { return $this->tracks; } /** * Setter of Tracks * * A music recording (track)—usually a single song (legacy spelling; * see singular form, track). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of tracks */ public function setTracks($value) { $this->tracks = $value; } /** * Add track to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of track */ public function addTrack($value) { $this->tracks[] = $value; } /** * Remove track to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of track */ public function removeTrack($value) { $key = array_search($value, $this->tracks); if($key !== false) { unset($this->tracks[$key]); } } } <file_sep>/GenerationSamples/NutritionInformationBundle/Model/AbstractNutritionInformation.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\NutritionInformationBundle\Model; use SchemaRepository\Bundle\NutritionInformationBundle\Model\NutritionInformationInterface; use SchemaRepository\Bundle\StructuredValueBundle\Model\AbstractStructuredValue; /** * Model of Nutrition Information * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\NutritionInformationBundle\Model */ abstract class AbstractNutritionInformation extends AbstractStructuredValue implements NutritionInformationInterface { /** * The number of calories * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $calories; /** * The number of grams of carbohydrates. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $carbohydrateContent; /** * The number of milligrams of cholesterol. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $cholesterolContent; /** * The number of grams of fat. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $fatContent; /** * The number of grams of fiber. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $fiberContent; /** * The number of grams of protein. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $proteinContent; /** * The number of grams of saturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $saturatedFatContent; /** * The serving size, in terms of the number of volume or mass * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $servingSize; /** * The number of milligrams of sodium. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $sodiumContent; /** * The number of grams of sugar. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $sugarContent; /** * The number of grams of trans fat. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $transFatContent; /** * The number of grams of unsaturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $unsaturatedFatContent; /** * Getter of Calories * * The number of calories * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCalories() { return $this->calories; } /** * Setter of Calories * * The number of calories * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of calories */ public function setCalories($value) { $this->calories = $value; } /** * Getter of Carbohydrate Content * * The number of grams of carbohydrates. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCarbohydrateContent() { return $this->carbohydrateContent; } /** * Setter of Carbohydrate Content * * The number of grams of carbohydrates. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of carbohydrateContent */ public function setCarbohydrateContent($value) { $this->carbohydrateContent = $value; } /** * Getter of Cholesterol Content * * The number of milligrams of cholesterol. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCholesterolContent() { return $this->cholesterolContent; } /** * Setter of Cholesterol Content * * The number of milligrams of cholesterol. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of cholesterolContent */ public function setCholesterolContent($value) { $this->cholesterolContent = $value; } /** * Getter of Fat Content * * The number of grams of fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFatContent() { return $this->fatContent; } /** * Setter of Fat Content * * The number of grams of fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of fatContent */ public function setFatContent($value) { $this->fatContent = $value; } /** * Getter of Fiber Content * * The number of grams of fiber. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFiberContent() { return $this->fiberContent; } /** * Setter of Fiber Content * * The number of grams of fiber. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of fiberContent */ public function setFiberContent($value) { $this->fiberContent = $value; } /** * Getter of Protein Content * * The number of grams of protein. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProteinContent() { return $this->proteinContent; } /** * Setter of Protein Content * * The number of grams of protein. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of proteinContent */ public function setProteinContent($value) { $this->proteinContent = $value; } /** * Getter of Saturated Fat Content * * The number of grams of saturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSaturatedFatContent() { return $this->saturatedFatContent; } /** * Setter of Saturated Fat Content * * The number of grams of saturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of saturatedFatContent */ public function setSaturatedFatContent($value) { $this->saturatedFatContent = $value; } /** * Getter of Serving Size * * The serving size, in terms of the number of volume or mass * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getServingSize() { return $this->servingSize; } /** * Setter of Serving Size * * The serving size, in terms of the number of volume or mass * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of servingSize */ public function setServingSize($value) { $this->servingSize = $value; } /** * Getter of Sodium Content * * The number of milligrams of sodium. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSodiumContent() { return $this->sodiumContent; } /** * Setter of Sodium Content * * The number of milligrams of sodium. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sodiumContent */ public function setSodiumContent($value) { $this->sodiumContent = $value; } /** * Getter of Sugar Content * * The number of grams of sugar. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSugarContent() { return $this->sugarContent; } /** * Setter of Sugar Content * * The number of grams of sugar. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sugarContent */ public function setSugarContent($value) { $this->sugarContent = $value; } /** * Getter of Trans Fat Content * * The number of grams of trans fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTransFatContent() { return $this->transFatContent; } /** * Setter of Trans Fat Content * * The number of grams of trans fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of transFatContent */ public function setTransFatContent($value) { $this->transFatContent = $value; } /** * Getter of Unsaturated Fat Content * * The number of grams of unsaturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getUnsaturatedFatContent() { return $this->unsaturatedFatContent; } /** * Setter of Unsaturated Fat Content * * The number of grams of unsaturated fat. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of unsaturatedFatContent */ public function setUnsaturatedFatContent($value) { $this->unsaturatedFatContent = $value; } } <file_sep>/GenerationSamples/PlaceBundle/Model/AbstractPlace.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\PlaceBundle\Model; use SchemaRepository\Bundle\PlaceBundle\Model\PlaceInterface; use SchemaRepository\Bundle\ThingBundle\Model\AbstractThing; /** * Model of Place * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\PlaceBundle\Model */ abstract class AbstractPlace extends AbstractThing implements PlaceInterface { /** * Physical address of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $address; /** * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $aggregateRating; /** * The basic containment relation between places. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $containedIn; /** * Upcoming or past event associated with this place or organization. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $event; /** * Upcoming or past events associated with this place or organization * (legacy spelling; see singular form, event). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $events; /** * The fax number. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $faxNumber; /** * The geo coordinates of the place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $geo; /** * The Global Location Number (GLN, sometimes also referred to as * International Location Number or ILN) of the respective organization, * person, or place. The GLN is a 13-digit number used to identify * parties and physical locations. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $globalLocationNumber; /** * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $interactionCount; /** * The International Standard of Industrial Classification of All * Economic Activities (ISIC), Revision 4 code for a particular * organization, business person, or place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $isicV4; /** * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $logo; /** * A URL to a map of the place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $map; /** * A URL to a map of the place (legacy spelling; see singular form, map). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $maps; /** * The opening hours of a certain place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $openingHoursSpecification; /** * A photograph of this place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $photo; /** * Photographs of this place (legacy spelling; see singular form, photo). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $photos; /** * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $review; /** * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $reviews; /** * The telephone number. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $telephone; /** * Getter of Address * * Physical address of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAddress() { return $this->address; } /** * Setter of Address * * Physical address of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of address */ public function setAddress($value) { $this->address = $value; } /** * Getter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAggregateRating() { return $this->aggregateRating; } /** * Setter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of aggregateRating */ public function setAggregateRating($value) { $this->aggregateRating = $value; } /** * Getter of Contained in * * The basic containment relation between places. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContainedIn() { return $this->containedIn; } /** * Setter of Contained in * * The basic containment relation between places. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of containedIn */ public function setContainedIn($value) { $this->containedIn = $value; } /** * Getter of Event * * Upcoming or past event associated with this place or organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEvent() { return $this->event; } /** * Setter of Event * * Upcoming or past event associated with this place or organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of event */ public function setEvent($value) { $this->event = $value; } /** * Getter of Events * * Upcoming or past events associated with this place or organization * (legacy spelling; see singular form, event). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEvents() { return $this->events; } /** * Setter of Events * * Upcoming or past events associated with this place or organization * (legacy spelling; see singular form, event). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of events */ public function setEvents($value) { $this->events = $value; } /** * Add event to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of event */ public function addEvent($value) { $this->events[] = $value; } /** * Remove event to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of event */ public function removeEvent($value) { $key = array_search($value, $this->events); if($key !== false) { unset($this->events[$key]); } } /** * Getter of Fax Number * * The fax number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFaxNumber() { return $this->faxNumber; } /** * Setter of Fax Number * * The fax number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of faxNumber */ public function setFaxNumber($value) { $this->faxNumber = $value; } /** * Getter of Geo * * The geo coordinates of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGeo() { return $this->geo; } /** * Setter of Geo * * The geo coordinates of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of geo */ public function setGeo($value) { $this->geo = $value; } /** * Getter of Global Location Number * * The Global Location Number (GLN, sometimes also referred to as * International Location Number or ILN) of the respective organization, * person, or place. The GLN is a 13-digit number used to identify * parties and physical locations. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGlobalLocationNumber() { return $this->globalLocationNumber; } /** * Setter of Global Location Number * * The Global Location Number (GLN, sometimes also referred to as * International Location Number or ILN) of the respective organization, * person, or place. The GLN is a 13-digit number used to identify * parties and physical locations. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of globalLocationNumber */ public function setGlobalLocationNumber($value) { $this->globalLocationNumber = $value; } /** * Getter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInteractionCount() { return $this->interactionCount; } /** * Setter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of interactionCount */ public function setInteractionCount($value) { $this->interactionCount = $value; } /** * Getter of Isic V4 * * The International Standard of Industrial Classification of All * Economic Activities (ISIC), Revision 4 code for a particular * organization, business person, or place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIsicV4() { return $this->isicV4; } /** * Setter of Isic V4 * * The International Standard of Industrial Classification of All * Economic Activities (ISIC), Revision 4 code for a particular * organization, business person, or place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of isicV4 */ public function setIsicV4($value) { $this->isicV4 = $value; } /** * Getter of Logo * * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLogo() { return $this->logo; } /** * Setter of Logo * * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of logo */ public function setLogo($value) { $this->logo = $value; } /** * Getter of Map * * A URL to a map of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMap() { return $this->map; } /** * Setter of Map * * A URL to a map of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of map */ public function setMap($value) { $this->map = $value; } /** * Getter of Maps * * A URL to a map of the place (legacy spelling; see singular form, map). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMaps() { return $this->maps; } /** * Setter of Maps * * A URL to a map of the place (legacy spelling; see singular form, map). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of maps */ public function setMaps($value) { $this->maps = $value; } /** * Getter of Opening Hours Specification * * The opening hours of a certain place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOpeningHoursSpecification() { return $this->openingHoursSpecification; } /** * Setter of Opening Hours Specification * * The opening hours of a certain place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of openingHoursSpecification */ public function setOpeningHoursSpecification($value) { $this->openingHoursSpecification = $value; } /** * Getter of Photo * * A photograph of this place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPhoto() { return $this->photo; } /** * Setter of Photo * * A photograph of this place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of photo */ public function setPhoto($value) { $this->photo = $value; } /** * Getter of Photos * * Photographs of this place (legacy spelling; see singular form, photo). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getPhotos() { return $this->photos; } /** * Setter of Photos * * Photographs of this place (legacy spelling; see singular form, photo). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of photos */ public function setPhotos($value) { $this->photos = $value; } /** * Add photo to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of photo */ public function addPhoto($value) { $this->photos[] = $value; } /** * Remove photo to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of photo */ public function removePhoto($value) { $key = array_search($value, $this->photos); if($key !== false) { unset($this->photos[$key]); } } /** * Getter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReview() { return $this->review; } /** * Setter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function setReview($value) { $this->review = $value; } /** * Getter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getReviews() { return $this->reviews; } /** * Setter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of reviews */ public function setReviews($value) { $this->reviews = $value; } /** * Add review to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function addReview($value) { $this->reviews[] = $value; } /** * Remove review to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function removeReview($value) { $key = <EMAIL>_search($value, $this->reviews); if($key !== false) { unset($this->reviews[$key]); } } /** * Getter of Telephone * * The telephone number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTelephone() { return $this->telephone; } /** * Setter of Telephone * * The telephone number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of telephone */ public function setTelephone($value) { $this->telephone = $value; } } <file_sep>/GenerationSamples/PersonBundle/Model/AbstractPerson.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\PersonBundle\Model; use SchemaRepository\Bundle\PersonBundle\Model\PersonInterface; use SchemaRepository\Bundle\ThingBundle\Model\AbstractThing; /** * Model of Person * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\PersonBundle\Model */ abstract class AbstractPerson extends AbstractThing implements PersonInterface { /** * An additional name for a Person, can be used for a middle name. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $additionalName; /** * Physical address of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $address; /** * An organization that this person is affiliated with. For example, a * school/university, a club, or a team. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $affiliation; /** * An educational organizations that the person is an alumni of. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $alumniOf; /** * An award won by this person or for this creative work. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $award; /** * Awards won by this person or for this creative work. (legacy spelling; * see singular form, award) * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $awards; /** * Date of birth. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $birthDate; /** * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $brand; /** * A child of the person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $children; /** * A colleague of the person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $colleague; /** * A colleague of the person (legacy spelling; see singular form, * colleague). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $colleagues; /** * A contact point for a person or organization. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $contactPoint; /** * A contact point for a person or organization (legacy spelling; see * singular form, contactPoint). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $contactPoints; /** * Date of death. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $deathDate; /** * The Dun & Bradstreet DUNS number for identifying an organization or * business person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $duns; /** * Email address. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $email; /** * Family name. In the U.S., the last name of an Person. This can be used * along with givenName instead of the Name property. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $familyName; /** * The fax number. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $faxNumber; /** * The most generic uni-directional social relation. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $follows; /** * Gender of the person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $gender; /** * Given name. In the U.S., the first name of a Person. This can be used * along with familyName instead of the Name property. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $givenName; /** * The Global Location Number (GLN, sometimes also referred to as * International Location Number or ILN) of the respective organization, * person, or place. The GLN is a 13-digit number used to identify * parties and physical locations. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $globalLocationNumber; /** * Points-of-Sales operated by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $hasPOS; /** * A contact location for a person's residence. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $homeLocation; /** * An honorific prefix preceding a Person's name such as Dr/Mrs/Mr. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $honorificPrefix; /** * An honorific suffix preceding a Person's name such as M.D. /PhD/MSCSW. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $honorificSuffix; /** * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $interactionCount; /** * The International Standard of Industrial Classification of All * Economic Activities (ISIC), Revision 4 code for a particular * organization, business person, or place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $isicV4; /** * The job title of the person (for example, Financial Manager). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $jobTitle; /** * The most generic bi-directional social/work relation. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $knows; /** * A pointer to products or services offered by the organization or * person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $makesOffer; /** * An organization to which the person belongs. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $memberOf; /** * The North American Industry Classification System (NAICS) code for a * particular organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $naics; /** * Nationality of the person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $nationality; /** * Products owned by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $owns; /** * A parent of this person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $parent; /** * A parents of the person (legacy spelling; see singular form, parent). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $parents; /** * Event that this person is a performer or participant in. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $performerIn; /** * The most generic familial relation. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relatedTo; /** * A pointer to products or services sought by the organization or person * (demand). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $seeks; /** * A sibling of the person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $sibling; /** * A sibling of the person (legacy spelling; see singular form, sibling). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $siblings; /** * The person's spouse. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $spouse; /** * The Tax / Fiscal ID of the organization or person, e.g. the TIN in the * US or the CIF/NIF in Spain. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $taxID; /** * The telephone number. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $telephone; /** * The Value-added Tax ID of the organisation or person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $vatID; /** * A contact location for a person's place of work. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $workLocation; /** * Organizations that the person works for. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $worksFor; /** * Getter of Additional Name * * An additional name for a Person, can be used for a middle name. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAdditionalName() { return $this->additionalName; } /** * Setter of Additional Name * * An additional name for a Person, can be used for a middle name. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of additionalName */ public function setAdditionalName($value) { $this->additionalName = $value; } /** * Getter of Address * * Physical address of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAddress() { return $this->address; } /** * Setter of Address * * Physical address of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of address */ public function setAddress($value) { $this->address = $value; } /** * Getter of Affiliation * * An organization that this person is affiliated with. For example, a * school/university, a club, or a team. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAffiliation() { return $this->affiliation; } /** * Setter of Affiliation * * An organization that this person is affiliated with. For example, a * school/university, a club, or a team. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of affiliation */ public function setAffiliation($value) { $this->affiliation = $value; } /** * Getter of Alumni of * * An educational organizations that the person is an alumni of. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAlumniOf() { return $this->alumniOf; } /** * Setter of Alumni of * * An educational organizations that the person is an alumni of. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of alumniOf */ public function setAlumniOf($value) { $this->alumniOf = $value; } /** * Getter of Award * * An award won by this person or for this creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAward() { return $this->award; } /** * Setter of Award * * An award won by this person or for this creative work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of award */ public function setAward($value) { $this->award = $value; } /** * Getter of Awards * * Awards won by this person or for this creative work. (legacy spelling; * see singular form, award) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAwards() { return $this->awards; } /** * Setter of Awards * * Awards won by this person or for this creative work. (legacy spelling; * see singular form, award) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of awards */ public function setAwards($value) { $this->awards = $value; } /** * Getter of Birth Date * * Date of birth. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBirthDate() { return $this->birthDate; } /** * Setter of Birth Date * * Date of birth. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of birthDate */ public function setBirthDate($value) { $this->birthDate = $value; } /** * Getter of Brand * * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBrand() { return $this->brand; } /** * Setter of Brand * * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of brand */ public function setBrand($value) { $this->brand = $value; } /** * Getter of Children * * A child of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getChildren() { return $this->children; } /** * Setter of Children * * A child of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of children */ public function setChildren($value) { $this->children = $value; } /** * Getter of Colleague * * A colleague of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getColleague() { return $this->colleague; } /** * Setter of Colleague * * A colleague of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of colleague */ public function setColleague($value) { $this->colleague = $value; } /** * Getter of Colleagues * * A colleague of the person (legacy spelling; see singular form, * colleague). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getColleagues() { return $this->colleagues; } /** * Setter of Colleagues * * A colleague of the person (legacy spelling; see singular form, * colleague). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of colleagues */ public function setColleagues($value) { $this->colleagues = $value; } /** * Add colleague to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of colleague */ public function addColleague($value) { $this->colleagues[] = $value; } /** * Remove colleague to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of colleague */ public function removeColleague($value) { $key = array_search($value, $this->colleagues); if($key !== false) { unset($this->colleagues[$key]); } } /** * Getter of Contact Point * * A contact point for a person or organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContactPoint() { return $this->contactPoint; } /** * Setter of Contact Point * * A contact point for a person or organization. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contactPoint */ public function setContactPoint($value) { $this->contactPoint = $value; } /** * Getter of Contact Points * * A contact point for a person or organization (legacy spelling; see * singular form, contactPoint). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getContactPoints() { return $this->contactPoints; } /** * Setter of Contact Points * * A contact point for a person or organization (legacy spelling; see * singular form, contactPoint). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of contactPoints */ public function setContactPoints($value) { $this->contactPoints = $value; } /** * Add contactPoint to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contactPoint */ public function addContactPoint($value) { $this->contactPoints[] = $value; } /** * Remove contactPoint to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contactPoint */ public function removeContactPoint($value) { $key = array_search($value, $this->contactPoints); if($key !== false) { unset($this->contactPoints[$key]); } } /** * Getter of Death Date * * Date of death. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDeathDate() { return $this->deathDate; } /** * Setter of Death Date * * Date of death. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of deathDate */ public function setDeathDate($value) { $this->deathDate = $value; } /** * Getter of Duns * * The Dun & Bradstreet DUNS number for identifying an organization or * business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDuns() { return $this->duns; } /** * Setter of Duns * * The Dun & Bradstreet DUNS number for identifying an organization or * business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of duns */ public function setDuns($value) { $this->duns = $value; } /** * Getter of Email * * Email address. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEmail() { return $this->email; } /** * Setter of Email * * Email address. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of email */ public function setEmail($value) { $this->email = $value; } /** * Getter of Family Name * * Family name. In the U.S., the last name of an Person. This can be used * along with givenName instead of the Name property. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFamilyName() { return $this->familyName; } /** * Setter of Family Name * * Family name. In the U.S., the last name of an Person. This can be used * along with givenName instead of the Name property. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of familyName */ public function setFamilyName($value) { $this->familyName = $value; } /** * Getter of Fax Number * * The fax number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFaxNumber() { return $this->faxNumber; } /** * Setter of Fax Number * * The fax number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of faxNumber */ public function setFaxNumber($value) { $this->faxNumber = $value; } /** * Getter of Follows * * The most generic uni-directional social relation. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFollows() { return $this->follows; } /** * Setter of Follows * * The most generic uni-directional social relation. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of follows */ public function setFollows($value) { $this->follows = $value; } /** * Getter of Gender * * Gender of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGender() { return $this->gender; } /** * Setter of Gender * * Gender of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of gender */ public function setGender($value) { $this->gender = $value; } /** * Getter of Given Name * * Given name. In the U.S., the first name of a Person. This can be used * along with familyName instead of the Name property. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGivenName() { return $this->givenName; } /** * Setter of Given Name * * Given name. In the U.S., the first name of a Person. This can be used * along with familyName instead of the Name property. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of givenName */ public function setGivenName($value) { $this->givenName = $value; } /** * Getter of Global Location Number * * The Global Location Number (GLN, sometimes also referred to as * International Location Number or ILN) of the respective organization, * person, or place. The GLN is a 13-digit number used to identify * parties and physical locations. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGlobalLocationNumber() { return $this->globalLocationNumber; } /** * Setter of Global Location Number * * The Global Location Number (GLN, sometimes also referred to as * International Location Number or ILN) of the respective organization, * person, or place. The GLN is a 13-digit number used to identify * parties and physical locations. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of globalLocationNumber */ public function setGlobalLocationNumber($value) { $this->globalLocationNumber = $value; } /** * Getter of Has POS * * Points-of-Sales operated by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHasPOS() { return $this->hasPOS; } /** * Setter of Has POS * * Points-of-Sales operated by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of hasPOS */ public function setHasPOS($value) { $this->hasPOS = $value; } /** * Getter of Home Location * * A contact location for a person's residence. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHomeLocation() { return $this->homeLocation; } /** * Setter of Home Location * * A contact location for a person's residence. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of homeLocation */ public function setHomeLocation($value) { $this->homeLocation = $value; } /** * Getter of Honorific Prefix * * An honorific prefix preceding a Person's name such as Dr/Mrs/Mr. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getHonorificPrefix() { return $this->honorificPrefix; } /** * Setter of Honorific Prefix * * An honorific prefix preceding a Person's name such as Dr/Mrs/Mr. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of honorificPrefix */ public function setHonorificPrefix($value) { $this->honorificPrefix = $value; } /** * Getter of Honorific Suffix * * An honorific suffix preceding a Person's name such as M.D. /PhD/MSCSW. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getHonorificSuffix() { return $this->honorificSuffix; } /** * Setter of Honorific Suffix * * An honorific suffix preceding a Person's name such as M.D. /PhD/MSCSW. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of honorificSuffix */ public function setHonorificSuffix($value) { $this->honorificSuffix = $value; } /** * Getter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInteractionCount() { return $this->interactionCount; } /** * Setter of Interaction Count * * A count of a specific user interactions with this item—for example, * 20 UserLikes, 5 UserComments, or 300 UserDownloads. The user * interaction type should be one of the sub types of UserInteraction. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of interactionCount */ public function setInteractionCount($value) { $this->interactionCount = $value; } /** * Getter of Isic V4 * * The International Standard of Industrial Classification of All * Economic Activities (ISIC), Revision 4 code for a particular * organization, business person, or place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIsicV4() { return $this->isicV4; } /** * Setter of Isic V4 * * The International Standard of Industrial Classification of All * Economic Activities (ISIC), Revision 4 code for a particular * organization, business person, or place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of isicV4 */ public function setIsicV4($value) { $this->isicV4 = $value; } /** * Getter of Job Title * * The job title of the person (for example, Financial Manager). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getJobTitle() { return $this->jobTitle; } /** * Setter of Job Title * * The job title of the person (for example, Financial Manager). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of jobTitle */ public function setJobTitle($value) { $this->jobTitle = $value; } /** * Getter of Knows * * The most generic bi-directional social/work relation. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getKnows() { return $this->knows; } /** * Setter of Knows * * The most generic bi-directional social/work relation. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of knows */ public function setKnows($value) { $this->knows = $value; } /** * Getter of Makes Offer * * A pointer to products or services offered by the organization or * person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMakesOffer() { return $this->makesOffer; } /** * Setter of Makes Offer * * A pointer to products or services offered by the organization or * person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of makesOffer */ public function setMakesOffer($value) { $this->makesOffer = $value; } /** * Getter of Member of * * An organization to which the person belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMemberOf() { return $this->memberOf; } /** * Setter of Member of * * An organization to which the person belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of memberOf */ public function setMemberOf($value) { $this->memberOf = $value; } /** * Getter of Naics * * The North American Industry Classification System (NAICS) code for a * particular organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getNaics() { return $this->naics; } /** * Setter of Naics * * The North American Industry Classification System (NAICS) code for a * particular organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of naics */ public function setNaics($value) { $this->naics = $value; } /** * Getter of Nationality * * Nationality of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getNationality() { return $this->nationality; } /** * Setter of Nationality * * Nationality of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of nationality */ public function setNationality($value) { $this->nationality = $value; } /** * Getter of Owns * * Products owned by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOwns() { return $this->owns; } /** * Setter of Owns * * Products owned by the organization or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of owns */ public function setOwns($value) { $this->owns = $value; } /** * Getter of Parent * * A parent of this person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getParent() { return $this->parent; } /** * Setter of Parent * * A parent of this person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of parent */ public function setParent($value) { $this->parent = $value; } /** * Getter of Parents * * A parents of the person (legacy spelling; see singular form, parent). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getParents() { return $this->parents; } /** * Setter of Parents * * A parents of the person (legacy spelling; see singular form, parent). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of parents */ public function setParents($value) { $this->parents = $value; } /** * Add parent to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of parent */ public function addParent($value) { $this->parents[] = $value; } /** * Remove parent to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of parent */ public function removeParent($value) { $key = array_search($value, $this->parents); if($key !== false) { unset($this->parents[$key]); } } /** * Getter of Performer in * * Event that this person is a performer or participant in. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPerformerIn() { return $this->performerIn; } /** * Setter of Performer in * * Event that this person is a performer or participant in. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of performerIn */ public function setPerformerIn($value) { $this->performerIn = $value; } /** * Getter of Related to * * The most generic familial relation. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedTo() { return $this->relatedTo; } /** * Setter of Related to * * The most generic familial relation. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relatedTo */ public function setRelatedTo($value) { $this->relatedTo = $value; } /** * Getter of Seeks * * A pointer to products or services sought by the organization or person * (demand). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeeks() { return $this->seeks; } /** * Setter of Seeks * * A pointer to products or services sought by the organization or person * (demand). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of seeks */ public function setSeeks($value) { $this->seeks = $value; } /** * Getter of Sibling * * A sibling of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSibling() { return $this->sibling; } /** * Setter of Sibling * * A sibling of the person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sibling */ public function setSibling($value) { $this->sibling = $value; } /** * Getter of Siblings * * A sibling of the person (legacy spelling; see singular form, sibling). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getSiblings() { return $this->siblings; } /** * Setter of Siblings * * A sibling of the person (legacy spelling; see singular form, sibling). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of siblings */ public function setSiblings($value) { $this->siblings = $value; } /** * Add sibling to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sibling */ public function addSibling($value) { $this->siblings[] = $value; } /** * Remove sibling to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sibling */ public function removeSibling($value) { $key = array_search($value, $this->siblings); if($key !== false) { unset($this->siblings[$key]); } } /** * Getter of Spouse * * The person's spouse. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSpouse() { return $this->spouse; } /** * Setter of Spouse * * The person's spouse. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of spouse */ public function setSpouse($value) { $this->spouse = $value; } /** * Getter of Tax ID * * The Tax / Fiscal ID of the organization or person, e.g. the TIN in the * US or the CIF/NIF in Spain. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTaxID() { return $this->taxID; } /** * Setter of Tax ID * * The Tax / Fiscal ID of the organization or person, e.g. the TIN in the * US or the CIF/NIF in Spain. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of taxID */ public function setTaxID($value) { $this->taxID = $value; } /** * Getter of Telephone * * The telephone number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTelephone() { return $this->telephone; } /** * Setter of Telephone * * The telephone number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of telephone */ public function setTelephone($value) { $this->telephone = $value; } /** * Getter of Vat ID * * The Value-added Tax ID of the organisation or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getVatID() { return $this->vatID; } /** * Setter of Vat ID * * The Value-added Tax ID of the organisation or person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of vatID */ public function setVatID($value) { $this->vatID = $value; } /** * Getter of Work Location * * A contact location for a person's place of work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWorkLocation() { return $this->workLocation; } /** * Setter of Work Location * * A contact location for a person's place of work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of workLocation */ public function setWorkLocation($value) { $this->workLocation = $value; } /** * Getter of Works for * * Organizations that the person works for. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWorksFor() { return $this->worksFor; } /** * Setter of Works for * * Organizations that the person works for. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of worksFor */ public function setWorksFor($value) { $this->worksFor = $value; } } <file_sep>/functions.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ include_once 'GeneratorClass/DataType.php'; include_once 'GeneratorClass/Property.php'; include_once 'GeneratorClass/ClassType.php'; /** * Get the schema from schema.org * * @return array */ function loadSchema() { $cacheFileName = __DIR__.'/all.json.cache'; // 24h Cache if(!file_exists($cacheFileName) || filemtime($cacheFileName) < time() - 24 * 60 * 60) { echo "Downloading...\n"; $schema = file_get_contents('http://schema.rdfs.org/all.json'); echo "Caching...\n"; file_put_contents($cacheFileName, $schema); } else { echo "From Cache...\n"; $schema = file_get_contents($cacheFileName); } echo "Loaded !\n"; return json_decode($schema); } /** * default file header * * @return type */ function phpFileHeader() { $header = "<?php\n\n" . "/**\n" . " * The MIT License (MIT)\n" . " * \n" . " * Copyright (c) " . date('Y') . " <NAME> <<EMAIL>>\n" . " * \n" . " * Permission is hereby granted, free of charge, to any person obtaining a copy\n" . " * of this software and associated documentation files (the \"Software\"), to deal\n" . " * in the Software without restriction, including without limitation the rights\n" . " * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n" . " * copies of the Software, and to permit persons to whom the Software is\n" . " * furnished to do so, subject to the following conditions:\n" . " * \n" . " * The above copyright notice and this permission notice shall be included in\n" . " * all copies or substantial portions of the Software.\n" . " * \n" . " * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n" . " * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n" . " * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n" . " * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n" . " * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n" . " * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n" . " * THE SOFTWARE.\n" . " */\n\n"; return $header; } function classTypeNameSpace($classTypeName, $folder = '', $classLoaded = '') { return 'SchemaRepository\\Bundle\\'.$classTypeName. 'Bundle' . ( (!empty($folder) ? '\\'.$folder : '') ) . ( (!empty($classLoaded) ? '\\'.$classLoaded : '') ) ; } function deleteDir($path, $deleteRoot = false) { return !empty($path) && is_file($path) ? @unlink($path) : (array_reduce(glob($path.'/*'), function ($r, $i) { return $r && deleteDir($i, true); }, TRUE)) && ($deleteRoot && rmdir($path)); }<file_sep>/GenerationSamples/UnitPriceSpecificationBundle/Model/UnitPriceSpecificationInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\UnitPriceSpecificationBundle\Model; use SchemaRepository\Bundle\PriceSpecificationBundle\Model\PriceSpecificationInterface; /** * Interface of Unit Price Specification Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\UnitPriceSpecificationBundle\Model */ interface UnitPriceSpecificationInterface extends PriceSpecificationInterface { /** * Getter of Billing Increment * * This property specifies the minimal quantity and rounding increment * that will be the basis for the billing. The unit of measurement is * specified by the unitCode property. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getBillingIncrement(); /** * Getter of Price Type * * A short text or acronym indicating multiple price specifications for * the same offer, e.g. SRP for the suggested retail price or INVOICE for * the invoice price, mostly used in the car industry. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPriceType(); /** * Getter of Unit Code * * The unit of measurement given using the UN/CEFACT Common Code (3 * characters). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getUnitCode(); } <file_sep>/GenerationSamples/MusicGroupBundle/Model/AbstractMusicGroup.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MusicGroupBundle\Model; use SchemaRepository\Bundle\MusicGroupBundle\Model\MusicGroupInterface; use SchemaRepository\Bundle\PerformingGroupBundle\Model\AbstractPerformingGroup; /** * Model of Music Group * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MusicGroupBundle\Model */ abstract class AbstractMusicGroup extends AbstractPerformingGroup implements MusicGroupInterface { /** * A music album. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $album; /** * A collection of music albums (legacy spelling; see singular form, * album). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $albums; /** * A member of the music group—for example, John, Paul, George, or * Ringo. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $musicGroupMember; /** * A music recording (track)—usually a single song. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $track; /** * A music recording (track)—usually a single song (legacy spelling; * see singular form, track). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $tracks; /** * Getter of Album * * A music album. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAlbum() { return $this->album; } /** * Setter of Album * * A music album. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of album */ public function setAlbum($value) { $this->album = $value; } /** * Getter of Albums * * A collection of music albums (legacy spelling; see singular form, * album). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getAlbums() { return $this->albums; } /** * Setter of Albums * * A collection of music albums (legacy spelling; see singular form, * album). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of albums */ public function setAlbums($value) { $this->albums = $value; } /** * Add album to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of album */ public function addAlbum($value) { $this->albums[] = $value; } /** * Remove album to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of album */ public function removeAlbum($value) { $key = array_search($value, $this->albums); if($key !== false) { unset($this->albums[$key]); } } /** * Getter of Music Group Member * * A member of the music group—for example, John, Paul, George, or * Ringo. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMusicGroupMember() { return $this->musicGroupMember; } /** * Setter of Music Group Member * * A member of the music group—for example, John, Paul, George, or * Ringo. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of musicGroupMember */ public function setMusicGroupMember($value) { $this->musicGroupMember = $value; } /** * Getter of Track * * A music recording (track)—usually a single song. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrack() { return $this->track; } /** * Setter of Track * * A music recording (track)—usually a single song. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of track */ public function setTrack($value) { $this->track = $value; } /** * Getter of Tracks * * A music recording (track)—usually a single song (legacy spelling; * see singular form, track). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getTracks() { return $this->tracks; } /** * Setter of Tracks * * A music recording (track)—usually a single song (legacy spelling; * see singular form, track). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of tracks */ public function setTracks($value) { $this->tracks = $value; } /** * Add track to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of track */ public function addTrack($value) { $this->tracks[] = $value; } /** * Remove track to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of track */ public function removeTrack($value) { $key = array_search($value, $this->tracks); if($key !== false) { unset($this->tracks[$key]); } } } <file_sep>/GenerationSamples/TVSeriesBundle/Model/TVSeriesInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\TVSeriesBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of TV Series Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\TVSeriesBundle\Model */ interface TVSeriesInterface extends CreativeWorkInterface { /** * Getter of Actor * * A cast member of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getActor(); /** * Getter of Actors * * A cast member of the movie, TV series, season, or episode, or video. * (legacy spelling; see singular form, actor) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getActors(); /** * Getter of Director * * The director of the movie, TV episode, or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDirector(); /** * Getter of End Date * * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEndDate(); /** * Getter of Episode * * An episode of a TV series or season. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEpisode(); /** * Getter of Episodes * * The episode of a TV series or season (legacy spelling; see singular * form, episode). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEpisodes(); /** * Getter of Music by * * The composer of the movie or TV soundtrack. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMusicBy(); /** * Getter of Number of Episodes * * The number of episodes in this season or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getNumberOfEpisodes(); /** * Getter of Producer * * The producer of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProducer(); /** * Getter of Production Company * * The production company or studio that made the movie, TV series, * season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProductionCompany(); /** * Getter of Season * * A season of a TV series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeason(); /** * Getter of Seasons * * The seasons of the TV series (legacy spelling; see singular form, * season). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getSeasons(); /** * Getter of Start Date * * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStartDate(); /** * Getter of Trailer * * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrailer(); } <file_sep>/GenerationSamples/MedicalStudyBundle/Model/AbstractMedicalStudy.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalStudyBundle\Model; use SchemaRepository\Bundle\MedicalStudyBundle\Model\MedicalStudyInterface; use SchemaRepository\Bundle\MedicalEntityBundle\Model\AbstractMedicalEntity; /** * Model of Medical Study * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalStudyBundle\Model */ abstract class AbstractMedicalStudy extends AbstractMedicalEntity implements MedicalStudyInterface { /** * Expected or actual outcomes of the study. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $outcome; /** * Any characteristics of the population used in the study, e.g. 'males * under 65'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $population; /** * Sponsor of the study. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $sponsor; /** * The status of the study (enumerated). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $status; /** * The location in which the study is taking/took place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $studyLocation; /** * A subject of the study, i.e. one of the medical conditions, therapies, * devices, drugs, etc. investigated by the study. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $studySubject; /** * Getter of Outcome * * Expected or actual outcomes of the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOutcome() { return $this->outcome; } /** * Setter of Outcome * * Expected or actual outcomes of the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of outcome */ public function setOutcome($value) { $this->outcome = $value; } /** * Getter of Population * * Any characteristics of the population used in the study, e.g. 'males * under 65'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPopulation() { return $this->population; } /** * Setter of Population * * Any characteristics of the population used in the study, e.g. 'males * under 65'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of population */ public function setPopulation($value) { $this->population = $value; } /** * Getter of Sponsor * * Sponsor of the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSponsor() { return $this->sponsor; } /** * Setter of Sponsor * * Sponsor of the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sponsor */ public function setSponsor($value) { $this->sponsor = $value; } /** * Getter of Status * * The status of the study (enumerated). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStatus() { return $this->status; } /** * Setter of Status * * The status of the study (enumerated). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of status */ public function setStatus($value) { $this->status = $value; } /** * Getter of Study Location * * The location in which the study is taking/took place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudyLocation() { return $this->studyLocation; } /** * Setter of Study Location * * The location in which the study is taking/took place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of studyLocation */ public function setStudyLocation($value) { $this->studyLocation = $value; } /** * Getter of Study Subject * * A subject of the study, i.e. one of the medical conditions, therapies, * devices, drugs, etc. investigated by the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudySubject() { return $this->studySubject; } /** * Setter of Study Subject * * A subject of the study, i.e. one of the medical conditions, therapies, * devices, drugs, etc. investigated by the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of studySubject */ public function setStudySubject($value) { $this->studySubject = $value; } } <file_sep>/GenerationSamples/APIReferenceBundle/Model/AbstractAPIReference.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\APIReferenceBundle\Model; use SchemaRepository\Bundle\APIReferenceBundle\Model\APIReferenceInterface; use SchemaRepository\Bundle\TechArticleBundle\Model\AbstractTechArticle; /** * Model of API Reference * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\APIReferenceBundle\Model */ abstract class AbstractAPIReference extends AbstractTechArticle implements APIReferenceInterface { /** * Library file name e.g., mscorlib.dll, system.web.dll * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $assembly; /** * Associated product/technology version. e.g., .NET Framework 4.5 * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $assemblyVersion; /** * Indicates whether API is managed or unmanaged. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $programmingModel; /** * Type of app development: phone, Metro style, desktop, XBox, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $targetPlatform; /** * Getter of Assembly * * Library file name e.g., mscorlib.dll, system.web.dll * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAssembly() { return $this->assembly; } /** * Setter of Assembly * * Library file name e.g., mscorlib.dll, system.web.dll * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of assembly */ public function setAssembly($value) { $this->assembly = $value; } /** * Getter of Assembly Version * * Associated product/technology version. e.g., .NET Framework 4.5 * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAssemblyVersion() { return $this->assemblyVersion; } /** * Setter of Assembly Version * * Associated product/technology version. e.g., .NET Framework 4.5 * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of assemblyVersion */ public function setAssemblyVersion($value) { $this->assemblyVersion = $value; } /** * Getter of Programming Model * * Indicates whether API is managed or unmanaged. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getProgrammingModel() { return $this->programmingModel; } /** * Setter of Programming Model * * Indicates whether API is managed or unmanaged. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of programmingModel */ public function setProgrammingModel($value) { $this->programmingModel = $value; } /** * Getter of Target Platform * * Type of app development: phone, Metro style, desktop, XBox, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTargetPlatform() { return $this->targetPlatform; } /** * Setter of Target Platform * * Type of app development: phone, Metro style, desktop, XBox, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of targetPlatform */ public function setTargetPlatform($value) { $this->targetPlatform = $value; } } <file_sep>/GenerationSamples/DemandBundle/Model/AbstractDemand.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DemandBundle\Model; use SchemaRepository\Bundle\DemandBundle\Model\DemandInterface; use SchemaRepository\Bundle\IntangibleBundle\Model\AbstractIntangible; /** * Model of Demand * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DemandBundle\Model */ abstract class AbstractDemand extends AbstractIntangible implements DemandInterface { /** * The payment method(s) accepted by seller for this offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $acceptedPaymentMethod; /** * The amount of time that is required between accepting the offer and * the actual usage of the resource or service. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $advanceBookingRequirement; /** * The availability of this item—for example In stock, Out of stock, * Pre-order, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $availability; /** * The end of the availability of the product or service included in the * offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var \DateTime */ protected $availabilityEnds; /** * The beginning of the availability of the product or service included * in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var \DateTime */ protected $availabilityStarts; /** * The place(s) from which the offer can be obtained (e.g. store * locations). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $availableAtOrFrom; /** * The delivery method(s) available for this offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $availableDeliveryMethod; /** * The business function (e.g. sell, lease, repair, dispose) of the offer * or component of a bundle (TypeAndQuantityNode). The default is * http://purl.org/goodrelations/v1#Sell. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $businessFunction; /** * The typical delay between the receipt of the order and the goods * leaving the warehouse. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $deliveryLeadTime; /** * The type(s) of customers for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $eligibleCustomerType; /** * The duration for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $eligibleDuration; /** * The interval and unit of measurement of ordering quantities for which * the offer or price specification is valid. This allows e.g. specifying * that a certain freight charge is valid only for a certain quantity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $eligibleQuantity; /** * The ISO 3166-1 (ISO 3166-1 alpha-2) or ISO 3166-2 code, or the * GeoShape for the geo-political region(s) for which the offer or * delivery charge specification is valid. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $eligibleRegion; /** * The transaction volume, in a monetary unit, for which the offer or * price specification is valid, e.g. for indicating a minimal purchasing * volume, to express free shipping above a certain order volume, or to * limit the acceptance of credit cards to purchases to a certain minimal * amount. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $eligibleTransactionVolume; /** * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $gtin13; /** * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $gtin14; /** * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $gtin8; /** * This links to a node or nodes indicating the exact quantity of the * products included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $includesObject; /** * The current approximate inventory level for the item or items. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $inventoryLevel; /** * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $itemCondition; /** * The item being sold. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $itemOffered; /** * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $mpn; /** * One or more detailed price specifications, indicating the unit price * and delivery or payment charges. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $priceSpecification; /** * The seller. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $seller; /** * The serial number or any alphanumeric identifier of a particular * product. When attached to an offer, it is a shortcut for the serial * number of the product included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $serialNumber; /** * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $sku; /** * The beginning of the validity of offer, price specification, or * opening hours data. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var \DateTime */ protected $validFrom; /** * The end of the validity of offer, price specification, or opening * hours data. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var \DateTime */ protected $validThrough; /** * The warranty promise(s) included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $warranty; /** * Getter of Accepted Payment Method * * The payment method(s) accepted by seller for this offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAcceptedPaymentMethod() { return $this->acceptedPaymentMethod; } /** * Setter of Accepted Payment Method * * The payment method(s) accepted by seller for this offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of acceptedPaymentMethod */ public function setAcceptedPaymentMethod($value) { $this->acceptedPaymentMethod = $value; } /** * Getter of Advance Booking Requirement * * The amount of time that is required between accepting the offer and * the actual usage of the resource or service. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAdvanceBookingRequirement() { return $this->advanceBookingRequirement; } /** * Setter of Advance Booking Requirement * * The amount of time that is required between accepting the offer and * the actual usage of the resource or service. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of advanceBookingRequirement */ public function setAdvanceBookingRequirement($value) { $this->advanceBookingRequirement = $value; } /** * Getter of Availability * * The availability of this item—for example In stock, Out of stock, * Pre-order, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailability() { return $this->availability; } /** * Setter of Availability * * The availability of this item—for example In stock, Out of stock, * Pre-order, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of availability */ public function setAvailability($value) { $this->availability = $value; } /** * Getter of Availability Ends * * The end of the availability of the product or service included in the * offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getAvailabilityEnds() { return $this->availabilityEnds; } /** * Setter of Availability Ends * * The end of the availability of the product or service included in the * offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param \DateTime $value Value of availabilityEnds */ public function setAvailabilityEnds($value) { $this->availabilityEnds = $value; } /** * Getter of Availability Starts * * The beginning of the availability of the product or service included * in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getAvailabilityStarts() { return $this->availabilityStarts; } /** * Setter of Availability Starts * * The beginning of the availability of the product or service included * in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param \DateTime $value Value of availabilityStarts */ public function setAvailabilityStarts($value) { $this->availabilityStarts = $value; } /** * Getter of Available At or From * * The place(s) from which the offer can be obtained (e.g. store * locations). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailableAtOrFrom() { return $this->availableAtOrFrom; } /** * Setter of Available At or From * * The place(s) from which the offer can be obtained (e.g. store * locations). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of availableAtOrFrom */ public function setAvailableAtOrFrom($value) { $this->availableAtOrFrom = $value; } /** * Getter of Available Delivery Method * * The delivery method(s) available for this offer. * * @author SchemaGenerator <<EMAIL>.fr> * * @access public * * @return mixed */ public function getAvailableDeliveryMethod() { return $this->availableDeliveryMethod; } /** * Setter of Available Delivery Method * * The delivery method(s) available for this offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of availableDeliveryMethod */ public function setAvailableDeliveryMethod($value) { $this->availableDeliveryMethod = $value; } /** * Getter of Business Function * * The business function (e.g. sell, lease, repair, dispose) of the offer * or component of a bundle (TypeAndQuantityNode). The default is * http://purl.org/goodrelations/v1#Sell. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBusinessFunction() { return $this->businessFunction; } /** * Setter of Business Function * * The business function (e.g. sell, lease, repair, dispose) of the offer * or component of a bundle (TypeAndQuantityNode). The default is * http://purl.org/goodrelations/v1#Sell. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of businessFunction */ public function setBusinessFunction($value) { $this->businessFunction = $value; } /** * Getter of Delivery Lead Time * * The typical delay between the receipt of the order and the goods * leaving the warehouse. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDeliveryLeadTime() { return $this->deliveryLeadTime; } /** * Setter of Delivery Lead Time * * The typical delay between the receipt of the order and the goods * leaving the warehouse. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of deliveryLeadTime */ public function setDeliveryLeadTime($value) { $this->deliveryLeadTime = $value; } /** * Getter of Eligible Customer Type * * The type(s) of customers for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleCustomerType() { return $this->eligibleCustomerType; } /** * Setter of Eligible Customer Type * * The type(s) of customers for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of eligibleCustomerType */ public function setEligibleCustomerType($value) { $this->eligibleCustomerType = $value; } /** * Getter of Eligible Duration * * The duration for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleDuration() { return $this->eligibleDuration; } /** * Setter of Eligible Duration * * The duration for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of eligibleDuration */ public function setEligibleDuration($value) { $this->eligibleDuration = $value; } /** * Getter of Eligible Quantity * * The interval and unit of measurement of ordering quantities for which * the offer or price specification is valid. This allows e.g. specifying * that a certain freight charge is valid only for a certain quantity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleQuantity() { return $this->eligibleQuantity; } /** * Setter of Eligible Quantity * * The interval and unit of measurement of ordering quantities for which * the offer or price specification is valid. This allows e.g. specifying * that a certain freight charge is valid only for a certain quantity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of eligibleQuantity */ public function setEligibleQuantity($value) { $this->eligibleQuantity = $value; } /** * Getter of Eligible Region * * The ISO 3166-1 (ISO 3166-1 alpha-2) or ISO 3166-2 code, or the * GeoShape for the geo-political region(s) for which the offer or * delivery charge specification is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleRegion() { return $this->eligibleRegion; } /** * Setter of Eligible Region * * The ISO 3166-1 (ISO 3166-1 alpha-2) or ISO 3166-2 code, or the * GeoShape for the geo-political region(s) for which the offer or * delivery charge specification is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of eligibleRegion */ public function setEligibleRegion($value) { $this->eligibleRegion = $value; } /** * Getter of Eligible Transaction Volume * * The transaction volume, in a monetary unit, for which the offer or * price specification is valid, e.g. for indicating a minimal purchasing * volume, to express free shipping above a certain order volume, or to * limit the acceptance of credit cards to purchases to a certain minimal * amount. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleTransactionVolume() { return $this->eligibleTransactionVolume; } /** * Setter of Eligible Transaction Volume * * The transaction volume, in a monetary unit, for which the offer or * price specification is valid, e.g. for indicating a minimal purchasing * volume, to express free shipping above a certain order volume, or to * limit the acceptance of credit cards to purchases to a certain minimal * amount. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of eligibleTransactionVolume */ public function setEligibleTransactionVolume($value) { $this->eligibleTransactionVolume = $value; } /** * Getter of Gtin13 * * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin13() { return $this->gtin13; } /** * Setter of Gtin13 * * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of gtin13 */ public function setGtin13($value) { $this->gtin13 = $value; } /** * Getter of Gtin14 * * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin14() { return $this->gtin14; } /** * Setter of Gtin14 * * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of gtin14 */ public function setGtin14($value) { $this->gtin14 = $value; } /** * Getter of Gtin8 * * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin8() { return $this->gtin8; } /** * Setter of Gtin8 * * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of gtin8 */ public function setGtin8($value) { $this->gtin8 = $value; } /** * Getter of Includes Object * * This links to a node or nodes indicating the exact quantity of the * products included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIncludesObject() { return $this->includesObject; } /** * Setter of Includes Object * * This links to a node or nodes indicating the exact quantity of the * products included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of includesObject */ public function setIncludesObject($value) { $this->includesObject = $value; } /** * Getter of Inventory Level * * The current approximate inventory level for the item or items. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInventoryLevel() { return $this->inventoryLevel; } /** * Setter of Inventory Level * * The current approximate inventory level for the item or items. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of inventoryLevel */ public function setInventoryLevel($value) { $this->inventoryLevel = $value; } /** * Getter of Item Condition * * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getItemCondition() { return $this->itemCondition; } /** * Setter of Item Condition * * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of itemCondition */ public function setItemCondition($value) { $this->itemCondition = $value; } /** * Getter of Item Offered * * The item being sold. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getItemOffered() { return $this->itemOffered; } /** * Setter of Item Offered * * The item being sold. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of itemOffered */ public function setItemOffered($value) { $this->itemOffered = $value; } /** * Getter of Mpn * * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMpn() { return $this->mpn; } /** * Setter of Mpn * * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of mpn */ public function setMpn($value) { $this->mpn = $value; } /** * Getter of Price Specification * * One or more detailed price specifications, indicating the unit price * and delivery or payment charges. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPriceSpecification() { return $this->priceSpecification; } /** * Setter of Price Specification * * One or more detailed price specifications, indicating the unit price * and delivery or payment charges. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of priceSpecification */ public function setPriceSpecification($value) { $this->priceSpecification = $value; } /** * Getter of Seller * * The seller. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeller() { return $this->seller; } /** * Setter of Seller * * The seller. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of seller */ public function setSeller($value) { $this->seller = $value; } /** * Getter of Serial Number * * The serial number or any alphanumeric identifier of a particular * product. When attached to an offer, it is a shortcut for the serial * number of the product included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSerialNumber() { return $this->serialNumber; } /** * Setter of Serial Number * * The serial number or any alphanumeric identifier of a particular * product. When attached to an offer, it is a shortcut for the serial * number of the product included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of serialNumber */ public function setSerialNumber($value) { $this->serialNumber = $value; } /** * Getter of Sku * * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSku() { return $this->sku; } /** * Setter of Sku * * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of sku */ public function setSku($value) { $this->sku = $value; } /** * Getter of Valid From * * The beginning of the validity of offer, price specification, or * opening hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getValidFrom() { return $this->validFrom; } /** * Setter of Valid From * * The beginning of the validity of offer, price specification, or * opening hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param \DateTime $value Value of validFrom */ public function setValidFrom($value) { $this->validFrom = $value; } /** * Getter of Valid Through * * The end of the validity of offer, price specification, or opening * hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getValidThrough() { return $this->validThrough; } /** * Setter of Valid Through * * The end of the validity of offer, price specification, or opening * hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param \DateTime $value Value of validThrough */ public function setValidThrough($value) { $this->validThrough = $value; } /** * Getter of Warranty * * The warranty promise(s) included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWarranty() { return $this->warranty; } /** * Setter of Warranty * * The warranty promise(s) included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of warranty */ public function setWarranty($value) { $this->warranty = $value; } } <file_sep>/GenerationSamples/JobPostingBundle/Entity/AbstractJobPosting.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\JobPostingBundle\Entity; use SchemaRepository\Bundle\JobPostingBundle\Model\AbstractJobPosting; use Doctrine\ORM\Mapping as ORM; use Symfony\Component\Validator\Constraints as Assert; /** * Job Posting Entity * * @ODM\MappedSuperclass() */ abstract class AbstractJobPosting extends AbstractJobPosting { /** * {@inheritdoc} * * @ORM\Column(name="id", type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="AUTO") */ protected $id; /** * {@inheritdoc} * * @ORM\Column(name="additional_type", type="string", nullable=true) * @Assert\Type(type="string") */ protected $additionalType; /** * {@inheritdoc} * * @ORM\Column(name="description", type="string", nullable=true) * @Assert\Type(type="string") */ protected $description; /** * {@inheritdoc} * * @ORM\Column(name="image", type="string", nullable=true) * @Assert\Type(type="string") */ protected $image; /** * {@inheritdoc} * * @ORM\Column(name="name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $name; /** * {@inheritdoc} * * @ORM\Column(name="same_as", type="string", nullable=true) * @Assert\Type(type="string") */ protected $sameAs; /** * {@inheritdoc} * * @ORM\Column(name="url", type="string", nullable=true) * @Assert\Type(type="string") */ protected $url; /** * {@inheritdoc} * * @ORM\Column(name="base_salary", type="float", nullable=true) */ protected $baseSalary; /** * {@inheritdoc} * * @ORM\Column(name="benefits", type="string", nullable=true) * @Assert\Type(type="string") */ protected $benefits; /** * {@inheritdoc} * * @ORM\Column(name="date_posted", type="string", nullable=true) */ protected $datePosted; /** * {@inheritdoc} * * @ORM\Column(name="education_requirements", type="string", nullable=true) * @Assert\Type(type="string") */ protected $educationRequirements; /** * {@inheritdoc} * * @ORM\Column(name="employment_type", type="string", nullable=true) * @Assert\Type(type="string") */ protected $employmentType; /** * {@inheritdoc} * * @ORM\Column(name="experience_requirements", type="string", nullable=true) * @Assert\Type(type="string") */ protected $experienceRequirements; /** * {@inheritdoc} * * @ORM\Column(name="hiring_organization", type="string", nullable=true) */ protected $hiringOrganization; /** * {@inheritdoc} * * @ORM\Column(name="incentives", type="string", nullable=true) * @Assert\Type(type="string") */ protected $incentives; /** * {@inheritdoc} * * @ORM\Column(name="industry", type="string", nullable=true) * @Assert\Type(type="string") */ protected $industry; /** * {@inheritdoc} * * @ORM\Column(name="job_location", type="string", nullable=true) */ protected $jobLocation; /** * {@inheritdoc} * * @ORM\Column(name="occupational_category", type="string", nullable=true) * @Assert\Type(type="string") */ protected $occupationalCategory; /** * {@inheritdoc} * * @ORM\Column(name="qualifications", type="string", nullable=true) * @Assert\Type(type="string") */ protected $qualifications; /** * {@inheritdoc} * * @ORM\Column(name="responsibilities", type="string", nullable=true) * @Assert\Type(type="string") */ protected $responsibilities; /** * {@inheritdoc} * * @ORM\Column(name="salary_currency", type="string", nullable=true) * @Assert\Type(type="string") */ protected $salaryCurrency; /** * {@inheritdoc} * * @ORM\Column(name="skills", type="string", nullable=true) * @Assert\Type(type="string") */ protected $skills; /** * {@inheritdoc} * * @ORM\Column(name="special_commitments", type="string", nullable=true) * @Assert\Type(type="string") */ protected $specialCommitments; /** * {@inheritdoc} * * @ORM\Column(name="title", type="string", nullable=true) * @Assert\Type(type="string") */ protected $title; /** * {@inheritdoc} * * @ORM\Column(name="work_hours", type="string", nullable=true) * @Assert\Type(type="string") */ protected $workHours; } <file_sep>/GenerationSamples/FoodEstablishmentBundle/Model/AbstractFoodEstablishment.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\FoodEstablishmentBundle\Model; use SchemaRepository\Bundle\FoodEstablishmentBundle\Model\FoodEstablishmentInterface; use SchemaRepository\Bundle\LocalBusinessBundle\Model\AbstractLocalBusiness; /** * Model of Food Establishment * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\FoodEstablishmentBundle\Model */ abstract class AbstractFoodEstablishment extends AbstractLocalBusiness implements FoodEstablishmentInterface { /** * Either Yes/No, or a URL at which reservations can be made. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $acceptsReservations; /** * Either the actual menu or a URL of the menu. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $menu; /** * The cuisine of the restaurant. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $servesCuisine; /** * Getter of Accepts Reservations * * Either Yes/No, or a URL at which reservations can be made. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getAcceptsReservations() { return $this->acceptsReservations; } /** * Setter of Accepts Reservations * * Either Yes/No, or a URL at which reservations can be made. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of acceptsReservations */ public function setAcceptsReservations($value) { $this->acceptsReservations = $value; } /** * Getter of Menu * * Either the actual menu or a URL of the menu. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getMenu() { return $this->menu; } /** * Setter of Menu * * Either the actual menu or a URL of the menu. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of menu */ public function setMenu($value) { $this->menu = $value; } /** * Getter of Serves Cuisine * * The cuisine of the restaurant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getServesCuisine() { return $this->servesCuisine; } /** * Setter of Serves Cuisine * * The cuisine of the restaurant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of servesCuisine */ public function setServesCuisine($value) { $this->servesCuisine = $value; } } <file_sep>/GenerationSamples/AnatomicalStructureBundle/Model/AnatomicalStructureInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\AnatomicalStructureBundle\Model; use SchemaRepository\Bundle\MedicalEntityBundle\Model\MedicalEntityInterface; /** * Interface of Anatomical Structure Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\AnatomicalStructureBundle\Model */ interface AnatomicalStructureInterface extends MedicalEntityInterface { /** * Getter of Associated Pathophysiology * * If applicable, a description of the pathophysiology associated with * the anatomical system, including potential abnormal changes in the * mechanical, physical, and biochemical functions of the system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAssociatedPathophysiology(); /** * Getter of Body Location * * Location in the body of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBodyLocation(); /** * Getter of Connected to * * Other anatomical structures to which this structure is connected. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getConnectedTo(); /** * Getter of Diagram * * An image containing a diagram that illustrates the structure and/or * its component substructures and/or connections with other structures. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDiagram(); /** * Getter of Function * * Function of the anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFunction(); /** * Getter of Part of System * * The anatomical or organ system that this structure is part of. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfSystem(); /** * Getter of Related Condition * * A medical condition associated with this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedCondition(); /** * Getter of Related Therapy * * A medical therapy related to this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedTherapy(); /** * Getter of Sub Structure * * Component (sub-)structure(s) that comprise this anatomical structure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSubStructure(); } <file_sep>/GenerationSamples/MusicGroupBundle/Model/MusicGroupInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MusicGroupBundle\Model; use SchemaRepository\Bundle\PerformingGroupBundle\Model\PerformingGroupInterface; /** * Interface of Music Group Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MusicGroupBundle\Model */ interface MusicGroupInterface extends PerformingGroupInterface { /** * Getter of Album * * A music album. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAlbum(); /** * Getter of Albums * * A collection of music albums (legacy spelling; see singular form, * album). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getAlbums(); /** * Getter of Music Group Member * * A member of the music group—for example, John, Paul, George, or * Ringo. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMusicGroupMember(); /** * Getter of Track * * A music recording (track)—usually a single song. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrack(); /** * Getter of Tracks * * A music recording (track)—usually a single song (legacy spelling; * see singular form, track). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getTracks(); } <file_sep>/GenerationSamples/DrugBundle/Entity/AbstractDrug.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DrugBundle\Entity; use SchemaRepository\Bundle\DrugBundle\Model\AbstractDrug; use Doctrine\ORM\Mapping as ORM; use Symfony\Component\Validator\Constraints as Assert; /** * Drug Entity * * @ODM\MappedSuperclass() */ abstract class AbstractDrug extends AbstractDrug { /** * {@inheritdoc} * * @ORM\Column(name="id", type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="AUTO") */ protected $id; /** * {@inheritdoc} * * @ORM\Column(name="additional_type", type="string", nullable=true) * @Assert\Type(type="string") */ protected $additionalType; /** * {@inheritdoc} * * @ORM\Column(name="description", type="string", nullable=true) * @Assert\Type(type="string") */ protected $description; /** * {@inheritdoc} * * @ORM\Column(name="image", type="string", nullable=true) * @Assert\Type(type="string") */ protected $image; /** * {@inheritdoc} * * @ORM\Column(name="name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $name; /** * {@inheritdoc} * * @ORM\Column(name="same_as", type="string", nullable=true) * @Assert\Type(type="string") */ protected $sameAs; /** * {@inheritdoc} * * @ORM\Column(name="url", type="string", nullable=true) * @Assert\Type(type="string") */ protected $url; /** * {@inheritdoc} * * @ORM\Column(name="alternate_name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $alternateName; /** * {@inheritdoc} * * @ORM\Column(name="code", type="string", nullable=true) */ protected $code; /** * {@inheritdoc} * * @ORM\Column(name="guideline", type="string", nullable=true) */ protected $guideline; /** * {@inheritdoc} * * @ORM\Column(name="medicine_system", type="string", nullable=true) */ protected $medicineSystem; /** * {@inheritdoc} * * @ORM\Column(name="recognizing_authority", type="string", nullable=true) */ protected $recognizingAuthority; /** * {@inheritdoc} * * @ORM\Column(name="relevant_specialty", type="string", nullable=true) */ protected $relevantSpecialty; /** * {@inheritdoc} * * @ORM\Column(name="study", type="string", nullable=true) */ protected $study; /** * {@inheritdoc} * * @ORM\Column(name="adverse_outcome", type="string", nullable=true) */ protected $adverseOutcome; /** * {@inheritdoc} * * @ORM\Column(name="contraindication", type="string", nullable=true) */ protected $contraindication; /** * {@inheritdoc} * * @ORM\Column(name="duplicate_therapy", type="string", nullable=true) */ protected $duplicateTherapy; /** * {@inheritdoc} * * @ORM\Column(name="indication", type="string", nullable=true) */ protected $indication; /** * {@inheritdoc} * * @ORM\Column(name="serious_adverse_outcome", type="string", nullable=true) */ protected $seriousAdverseOutcome; /** * {@inheritdoc} * * @ORM\Column(name="active_ingredient", type="string", nullable=true) * @Assert\Type(type="string") */ protected $activeIngredient; /** * {@inheritdoc} * * @ORM\Column(name="administration_route", type="string", nullable=true) * @Assert\Type(type="string") */ protected $administrationRoute; /** * {@inheritdoc} * * @ORM\Column(name="alcohol_warning", type="string", nullable=true) * @Assert\Type(type="string") */ protected $alcoholWarning; /** * {@inheritdoc} * * @ORM\Column(name="available_strength", type="string", nullable=true) */ protected $availableStrength; /** * {@inheritdoc} * * @ORM\Column(name="breastfeeding_warning", type="string", nullable=true) * @Assert\Type(type="string") */ protected $breastfeedingWarning; /** * {@inheritdoc} * * @ORM\Column(name="clincal_pharmacology", type="string", nullable=true) * @Assert\Type(type="string") */ protected $clincalPharmacology; /** * {@inheritdoc} * * @ORM\Column(name="cost", type="string", nullable=true) */ protected $cost; /** * {@inheritdoc} * * @ORM\Column(name="dosage_form", type="string", nullable=true) * @Assert\Type(type="string") */ protected $dosageForm; /** * {@inheritdoc} * * @ORM\Column(name="dose_schedule", type="string", nullable=true) */ protected $doseSchedule; /** * {@inheritdoc} * * @ORM\Column(name="drug_class", type="string", nullable=true) */ protected $drugClass; /** * {@inheritdoc} * * @ORM\Column(name="food_warning", type="string", nullable=true) * @Assert\Type(type="string") */ protected $foodWarning; /** * {@inheritdoc} * * @ORM\Column(name="interacting_drug", type="string", nullable=true) */ protected $interactingDrug; /** * {@inheritdoc} * * @ORM\Column(name="is_available_generically", type="boolean", nullable=true) */ protected $isAvailableGenerically; /** * {@inheritdoc} * * @ORM\Column(name="is_proprietary", type="boolean", nullable=true) */ protected $isProprietary; /** * {@inheritdoc} * * @ORM\Column(name="label_details", type="string", nullable=true) * @Assert\Type(type="string") */ protected $labelDetails; /** * {@inheritdoc} * * @ORM\Column(name="legal_status", type="string", nullable=true) */ protected $legalStatus; /** * {@inheritdoc} * * @ORM\Column(name="manufacturer", type="string", nullable=true) */ protected $manufacturer; /** * {@inheritdoc} * * @ORM\Column(name="mechanism_of_action", type="string", nullable=true) * @Assert\Type(type="string") */ protected $mechanismOfAction; /** * {@inheritdoc} * * @ORM\Column(name="non_proprietary_name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $nonProprietaryName; /** * {@inheritdoc} * * @ORM\Column(name="overdosage", type="string", nullable=true) * @Assert\Type(type="string") */ protected $overdosage; /** * {@inheritdoc} * * @ORM\Column(name="pregnancy_category", type="string", nullable=true) */ protected $pregnancyCategory; /** * {@inheritdoc} * * @ORM\Column(name="pregnancy_warning", type="string", nullable=true) * @Assert\Type(type="string") */ protected $pregnancyWarning; /** * {@inheritdoc} * * @ORM\Column(name="prescribing_info", type="string", nullable=true) * @Assert\Type(type="string") */ protected $prescribingInfo; /** * {@inheritdoc} * * @ORM\Column(name="prescription_status", type="string", nullable=true) */ protected $prescriptionStatus; /** * {@inheritdoc} * * @ORM\Column(name="related_drug", type="string", nullable=true) */ protected $relatedDrug; /** * {@inheritdoc} * * @ORM\Column(name="warning", type="string", nullable=true) */ protected $warning; } <file_sep>/GenerationSamples/OfferBundle/Model/OfferInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\OfferBundle\Model; use SchemaRepository\Bundle\IntangibleBundle\Model\IntangibleInterface; /** * Interface of Offer Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\OfferBundle\Model */ interface OfferInterface extends IntangibleInterface { /** * Getter of Accepted Payment Method * * The payment method(s) accepted by seller for this offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAcceptedPaymentMethod(); /** * Getter of Add On * * An additional offer that can only be obtained in combination with the * first base offer (e.g. supplements and extensions that are available * for a surcharge). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAddOn(); /** * Getter of Advance Booking Requirement * * The amount of time that is required between accepting the offer and * the actual usage of the resource or service. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAdvanceBookingRequirement(); /** * Getter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAggregateRating(); /** * Getter of Availability * * The availability of this item—for example In stock, Out of stock, * Pre-order, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailability(); /** * Getter of Availability Ends * * The end of the availability of the product or service included in the * offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getAvailabilityEnds(); /** * Getter of Availability Starts * * The beginning of the availability of the product or service included * in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getAvailabilityStarts(); /** * Getter of Available At or From * * The place(s) from which the offer can be obtained (e.g. store * locations). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailableAtOrFrom(); /** * Getter of Available Delivery Method * * The delivery method(s) available for this offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailableDeliveryMethod(); /** * Getter of Business Function * * The business function (e.g. sell, lease, repair, dispose) of the offer * or component of a bundle (TypeAndQuantityNode). The default is * http://purl.org/goodrelations/v1#Sell. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBusinessFunction(); /** * Getter of Category * * A category for the item. Greater signs or slashes can be used to * informally indicate a category hierarchy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCategory(); /** * Getter of Delivery Lead Time * * The typical delay between the receipt of the order and the goods * leaving the warehouse. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDeliveryLeadTime(); /** * Getter of Eligible Customer Type * * The type(s) of customers for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleCustomerType(); /** * Getter of Eligible Duration * * The duration for which the given offer is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleDuration(); /** * Getter of Eligible Quantity * * The interval and unit of measurement of ordering quantities for which * the offer or price specification is valid. This allows e.g. specifying * that a certain freight charge is valid only for a certain quantity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleQuantity(); /** * Getter of Eligible Region * * The ISO 3166-1 (ISO 3166-1 alpha-2) or ISO 3166-2 code, or the * GeoShape for the geo-political region(s) for which the offer or * delivery charge specification is valid. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleRegion(); /** * Getter of Eligible Transaction Volume * * The transaction volume, in a monetary unit, for which the offer or * price specification is valid, e.g. for indicating a minimal purchasing * volume, to express free shipping above a certain order volume, or to * limit the acceptance of credit cards to purchases to a certain minimal * amount. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleTransactionVolume(); /** * Getter of Gtin13 * * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin13(); /** * Getter of Gtin14 * * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin14(); /** * Getter of Gtin8 * * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin8(); /** * Getter of Includes Object * * This links to a node or nodes indicating the exact quantity of the * products included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIncludesObject(); /** * Getter of Inventory Level * * The current approximate inventory level for the item or items. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInventoryLevel(); /** * Getter of Item Condition * * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getItemCondition(); /** * Getter of Item Offered * * The item being sold. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getItemOffered(); /** * Getter of Mpn * * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMpn(); /** * Getter of Price * * The offer price of a product, or of a price component when attached to * PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|integer|long|float|decimal */ public function getPrice(); /** * Getter of Price Currency * * The currency (in 3-letter ISO 4217 format) of the offer price or a * price component, when attached to PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPriceCurrency(); /** * Getter of Price Specification * * One or more detailed price specifications, indicating the unit price * and delivery or payment charges. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPriceSpecification(); /** * Getter of Price Valid Until * * The date after which the price is no longer available. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPriceValidUntil(); /** * Getter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReview(); /** * Getter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getReviews(); /** * Getter of Seller * * The seller. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeller(); /** * Getter of Serial Number * * The serial number or any alphanumeric identifier of a particular * product. When attached to an offer, it is a shortcut for the serial * number of the product included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSerialNumber(); /** * Getter of Sku * * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSku(); /** * Getter of Valid From * * The beginning of the validity of offer, price specification, or * opening hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getValidFrom(); /** * Getter of Valid Through * * The end of the validity of offer, price specification, or opening * hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getValidThrough(); /** * Getter of Warranty * * The warranty promise(s) included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWarranty(); } <file_sep>/GenerationSamples/ContactPageBundle/Entity/AbstractContactPage.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ContactPageBundle\Entity; use SchemaRepository\Bundle\ContactPageBundle\Model\AbstractContactPage; use Doctrine\ORM\Mapping as ORM; use Symfony\Component\Validator\Constraints as Assert; /** * Contact Page Entity * * @ODM\MappedSuperclass() */ abstract class AbstractContactPage extends AbstractContactPage { /** * {@inheritdoc} * * @ORM\Column(name="id", type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="AUTO") */ protected $id; /** * {@inheritdoc} * * @ORM\Column(name="additional_type", type="string", nullable=true) * @Assert\Type(type="string") */ protected $additionalType; /** * {@inheritdoc} * * @ORM\Column(name="description", type="string", nullable=true) * @Assert\Type(type="string") */ protected $description; /** * {@inheritdoc} * * @ORM\Column(name="image", type="string", nullable=true) * @Assert\Type(type="string") */ protected $image; /** * {@inheritdoc} * * @ORM\Column(name="name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $name; /** * {@inheritdoc} * * @ORM\Column(name="same_as", type="string", nullable=true) * @Assert\Type(type="string") */ protected $sameAs; /** * {@inheritdoc} * * @ORM\Column(name="url", type="string", nullable=true) * @Assert\Type(type="string") */ protected $url; /** * {@inheritdoc} * * @ORM\Column(name="about", type="string", nullable=true) */ protected $about; /** * {@inheritdoc} * * @ORM\Column(name="accountable_person", type="string", nullable=true) */ protected $accountablePerson; /** * {@inheritdoc} * * @ORM\Column(name="aggregate_rating", type="string", nullable=true) */ protected $aggregateRating; /** * {@inheritdoc} * * @ORM\Column(name="alternative_headline", type="string", nullable=true) * @Assert\Type(type="string") */ protected $alternativeHeadline; /** * {@inheritdoc} * * @ORM\Column(name="associated_media", type="string", nullable=true) */ protected $associatedMedia; /** * {@inheritdoc} * * @ORM\Column(name="audience", type="string", nullable=true) */ protected $audience; /** * {@inheritdoc} * * @ORM\Column(name="audio", type="string", nullable=true) */ protected $audio; /** * {@inheritdoc} * * @ORM\Column(name="author", type="string", nullable=true) */ protected $author; /** * {@inheritdoc} * * @ORM\Column(name="award", type="string", nullable=true) * @Assert\Type(type="string") */ protected $award; /** * {@inheritdoc} * * @ORM\Column(name="awards", type="string", nullable=true) * @Assert\Type(type="string") */ protected $awards; /** * {@inheritdoc} * * @ORM\Column(name="citation", type="string", nullable=true) */ protected $citation; /** * {@inheritdoc} * * @ORM\Column(name="comment", type="string", nullable=true) */ protected $comment; /** * {@inheritdoc} * * @ORM\Column(name="content_location", type="string", nullable=true) */ protected $contentLocation; /** * {@inheritdoc} * * @ORM\Column(name="content_rating", type="string", nullable=true) * @Assert\Type(type="string") */ protected $contentRating; /** * {@inheritdoc} * * @ORM\Column(name="contributor", type="string", nullable=true) */ protected $contributor; /** * {@inheritdoc} * * @ORM\Column(name="copyright_holder", type="string", nullable=true) */ protected $copyrightHolder; /** * {@inheritdoc} * * @ORM\Column(name="copyright_year", type="float", nullable=true) */ protected $copyrightYear; /** * {@inheritdoc} * * @ORM\Column(name="creator", type="string", nullable=true) */ protected $creator; /** * {@inheritdoc} * * @ORM\Column(name="date_created", type="string", nullable=true) */ protected $dateCreated; /** * {@inheritdoc} * * @ORM\Column(name="date_modified", type="string", nullable=true) */ protected $dateModified; /** * {@inheritdoc} * * @ORM\Column(name="date_published", type="string", nullable=true) */ protected $datePublished; /** * {@inheritdoc} * * @ORM\Column(name="discussion_url", type="string", nullable=true) * @Assert\Type(type="string") */ protected $discussionUrl; /** * {@inheritdoc} * * @ORM\Column(name="editor", type="string", nullable=true) */ protected $editor; /** * {@inheritdoc} * * @ORM\Column(name="educational_alignment", type="string", nullable=true) */ protected $educationalAlignment; /** * {@inheritdoc} * * @ORM\Column(name="educational_use", type="string", nullable=true) * @Assert\Type(type="string") */ protected $educationalUse; /** * {@inheritdoc} * * @ORM\Column(name="encoding", type="string", nullable=true) */ protected $encoding; /** * {@inheritdoc} * * @ORM\Column(name="encodings", type="array", nullable=true) */ protected $encodings; /** * {@inheritdoc} * * @ORM\Column(name="genre", type="string", nullable=true) * @Assert\Type(type="string") */ protected $genre; /** * {@inheritdoc} * * @ORM\Column(name="headline", type="string", nullable=true) * @Assert\Type(type="string") */ protected $headline; /** * {@inheritdoc} * * @ORM\Column(name="in_language", type="string", nullable=true) * @Assert\Type(type="string") */ protected $inLanguage; /** * {@inheritdoc} * * @ORM\Column(name="interaction_count", type="string", nullable=true) * @Assert\Type(type="string") */ protected $interactionCount; /** * {@inheritdoc} * * @ORM\Column(name="interactivity_type", type="string", nullable=true) * @Assert\Type(type="string") */ protected $interactivityType; /** * {@inheritdoc} * * @ORM\Column(name="is_based_on_url", type="string", nullable=true) * @Assert\Type(type="string") */ protected $isBasedOnUrl; /** * {@inheritdoc} * * @ORM\Column(name="is_family_friendly", type="boolean", nullable=true) */ protected $isFamilyFriendly; /** * {@inheritdoc} * * @ORM\Column(name="keywords", type="string", nullable=true) * @Assert\Type(type="string") */ protected $keywords; /** * {@inheritdoc} * * @ORM\Column(name="learning_resource_type", type="string", nullable=true) * @Assert\Type(type="string") */ protected $learningResourceType; /** * {@inheritdoc} * * @ORM\Column(name="mentions", type="string", nullable=true) */ protected $mentions; /** * {@inheritdoc} * * @ORM\Column(name="offers", type="string", nullable=true) */ protected $offers; /** * {@inheritdoc} * * @ORM\Column(name="provider", type="string", nullable=true) */ protected $provider; /** * {@inheritdoc} * * @ORM\Column(name="publisher", type="string", nullable=true) */ protected $publisher; /** * {@inheritdoc} * * @ORM\Column(name="publishing_principles", type="string", nullable=true) * @Assert\Type(type="string") */ protected $publishingPrinciples; /** * {@inheritdoc} * * @ORM\Column(name="review", type="string", nullable=true) */ protected $review; /** * {@inheritdoc} * * @ORM\Column(name="reviews", type="array", nullable=true) */ protected $reviews; /** * {@inheritdoc} * * @ORM\Column(name="source_organization", type="string", nullable=true) */ protected $sourceOrganization; /** * {@inheritdoc} * * @ORM\Column(name="text", type="string", nullable=true) * @Assert\Type(type="string") */ protected $text; /** * {@inheritdoc} * * @ORM\Column(name="thumbnail_url", type="string", nullable=true) * @Assert\Type(type="string") */ protected $thumbnailUrl; /** * {@inheritdoc} * * @ORM\Column(name="time_required", type="string", nullable=true) */ protected $timeRequired; /** * {@inheritdoc} * * @ORM\Column(name="typical_age_range", type="string", nullable=true) * @Assert\Type(type="string") */ protected $typicalAgeRange; /** * {@inheritdoc} * * @ORM\Column(name="version", type="float", nullable=true) */ protected $version; /** * {@inheritdoc} * * @ORM\Column(name="video", type="string", nullable=true) */ protected $video; /** * {@inheritdoc} * * @ORM\Column(name="breadcrumb", type="string", nullable=true) * @Assert\Type(type="string") */ protected $breadcrumb; /** * {@inheritdoc} * * @ORM\Column(name="is_part_of", type="string", nullable=true) */ protected $isPartOf; /** * {@inheritdoc} * * @ORM\Column(name="last_reviewed", type="string", nullable=true) */ protected $lastReviewed; /** * {@inheritdoc} * * @ORM\Column(name="main_content_of_page", type="string", nullable=true) */ protected $mainContentOfPage; /** * {@inheritdoc} * * @ORM\Column(name="primary_image_of_page", type="string", nullable=true) */ protected $primaryImageOfPage; /** * {@inheritdoc} * * @ORM\Column(name="related_link", type="string", nullable=true) * @Assert\Type(type="string") */ protected $relatedLink; /** * {@inheritdoc} * * @ORM\Column(name="reviewed_by", type="string", nullable=true) */ protected $reviewedBy; /** * {@inheritdoc} * * @ORM\Column(name="significant_link", type="string", nullable=true) * @Assert\Type(type="string") */ protected $significantLink; /** * {@inheritdoc} * * @ORM\Column(name="significant_links", type="string", nullable=true) * @Assert\Type(type="string") */ protected $significantLinks; /** * {@inheritdoc} * * @ORM\Column(name="specialty", type="string", nullable=true) */ protected $specialty; } <file_sep>/GenerationSamples/PriceSpecificationBundle/Model/AbstractPriceSpecification.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\PriceSpecificationBundle\Model; use SchemaRepository\Bundle\PriceSpecificationBundle\Model\PriceSpecificationInterface; use SchemaRepository\Bundle\StructuredValueBundle\Model\AbstractStructuredValue; /** * Model of Price Specification * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\PriceSpecificationBundle\Model */ abstract class AbstractPriceSpecification extends AbstractStructuredValue implements PriceSpecificationInterface { /** * The interval and unit of measurement of ordering quantities for which * the offer or price specification is valid. This allows e.g. specifying * that a certain freight charge is valid only for a certain quantity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $eligibleQuantity; /** * The transaction volume, in a monetary unit, for which the offer or * price specification is valid, e.g. for indicating a minimal purchasing * volume, to express free shipping above a certain order volume, or to * limit the acceptance of credit cards to purchases to a certain minimal * amount. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $eligibleTransactionVolume; /** * The highest price if the price is a range. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $maxPrice; /** * The lowest price if the price is a range. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $minPrice; /** * The offer price of a product, or of a price component when attached to * PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|integer|long|float|decimal */ protected $price; /** * The currency (in 3-letter ISO 4217 format) of the offer price or a * price component, when attached to PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $priceCurrency; /** * The beginning of the validity of offer, price specification, or * opening hours data. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var \DateTime */ protected $validFrom; /** * The end of the validity of offer, price specification, or opening * hours data. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var \DateTime */ protected $validThrough; /** * Specifies whether the applicable value-added tax (VAT) is included in * the price specification or not. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var bool */ protected $valueAddedTaxIncluded; /** * Getter of Eligible Quantity * * The interval and unit of measurement of ordering quantities for which * the offer or price specification is valid. This allows e.g. specifying * that a certain freight charge is valid only for a certain quantity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleQuantity() { return $this->eligibleQuantity; } /** * Setter of Eligible Quantity * * The interval and unit of measurement of ordering quantities for which * the offer or price specification is valid. This allows e.g. specifying * that a certain freight charge is valid only for a certain quantity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of eligibleQuantity */ public function setEligibleQuantity($value) { $this->eligibleQuantity = $value; } /** * Getter of Eligible Transaction Volume * * The transaction volume, in a monetary unit, for which the offer or * price specification is valid, e.g. for indicating a minimal purchasing * volume, to express free shipping above a certain order volume, or to * limit the acceptance of credit cards to purchases to a certain minimal * amount. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEligibleTransactionVolume() { return $this->eligibleTransactionVolume; } /** * Setter of Eligible Transaction Volume * * The transaction volume, in a monetary unit, for which the offer or * price specification is valid, e.g. for indicating a minimal purchasing * volume, to express free shipping above a certain order volume, or to * limit the acceptance of credit cards to purchases to a certain minimal * amount. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of eligibleTransactionVolume */ public function setEligibleTransactionVolume($value) { $this->eligibleTransactionVolume = $value; } /** * Getter of Max Price * * The highest price if the price is a range. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getMaxPrice() { return $this->maxPrice; } /** * Setter of Max Price * * The highest price if the price is a range. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of maxPrice */ public function setMaxPrice($value) { $this->maxPrice = $value; } /** * Getter of Min Price * * The lowest price if the price is a range. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getMinPrice() { return $this->minPrice; } /** * Setter of Min Price * * The lowest price if the price is a range. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of minPrice */ public function setMinPrice($value) { $this->minPrice = $value; } /** * Getter of Price * * The offer price of a product, or of a price component when attached to * PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|integer|long|float|decimal */ public function getPrice() { return $this->price; } /** * Setter of Price * * The offer price of a product, or of a price component when attached to * PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|integer|long|float|decimal $value Value of price */ public function setPrice($value) { $this->price = $value; } /** * Getter of Price Currency * * The currency (in 3-letter ISO 4217 format) of the offer price or a * price component, when attached to PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPriceCurrency() { return $this->priceCurrency; } /** * Setter of Price Currency * * The currency (in 3-letter ISO 4217 format) of the offer price or a * price component, when attached to PriceSpecification and its subtypes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of priceCurrency */ public function setPriceCurrency($value) { $this->priceCurrency = $value; } /** * Getter of Valid From * * The beginning of the validity of offer, price specification, or * opening hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getValidFrom() { return $this->validFrom; } /** * Setter of Valid From * * The beginning of the validity of offer, price specification, or * opening hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param \DateTime $value Value of validFrom */ public function setValidFrom($value) { $this->validFrom = $value; } /** * Getter of Valid Through * * The end of the validity of offer, price specification, or opening * hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getValidThrough() { return $this->validThrough; } /** * Setter of Valid Through * * The end of the validity of offer, price specification, or opening * hours data. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param \DateTime $value Value of validThrough */ public function setValidThrough($value) { $this->validThrough = $value; } /** * Getter of Value Added Tax Included * * Specifies whether the applicable value-added tax (VAT) is included in * the price specification or not. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getValueAddedTaxIncluded() { return $this->valueAddedTaxIncluded; } /** * Setter of Value Added Tax Included * * Specifies whether the applicable value-added tax (VAT) is included in * the price specification or not. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param bool $value Value of valueAddedTaxIncluded */ public function setValueAddedTaxIncluded($value) { $this->valueAddedTaxIncluded = $value; } } <file_sep>/GenerationSamples/DietarySupplementBundle/Model/AbstractDietarySupplement.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DietarySupplementBundle\Model; use SchemaRepository\Bundle\DietarySupplementBundle\Model\DietarySupplementInterface; use SchemaRepository\Bundle\MedicalTherapyBundle\Model\AbstractMedicalTherapy; /** * Model of Dietary Supplement * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DietarySupplementBundle\Model */ abstract class AbstractDietarySupplement extends AbstractMedicalTherapy implements DietarySupplementInterface { /** * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $activeIngredient; /** * Descriptive information establishing a historical perspective on the * supplement. May include the rationale for the name, the population * where the supplement first came to prominence, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $background; /** * A dosage form in which this drug/supplement is available, e.g. * 'tablet', 'suspension', 'injection'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $dosageForm; /** * True if this item's name is a proprietary/brand name (vs. generic * name). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var bool */ protected $isProprietary; /** * The drug or supplement's legal status, including any controlled * substance schedules that apply. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $legalStatus; /** * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $manufacturer; /** * Recommended intake of this supplement for a given population as * defined by a specific recommending authority. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $maximumIntake; /** * The specific biochemical interaction through which this drug or * supplement produces its pharmacological effect. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $mechanismOfAction; /** * The generic name of this drug or supplement. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $nonProprietaryName; /** * Recommended intake of this supplement for a given population as * defined by a specific recommending authority. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $recommendedIntake; /** * Any potential safety concern associated with the supplement. May * include interactions with other drugs and foods, pregnancy, * breastfeeding, known adverse reactions, and documented efficacy of the * supplement. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $safetyConsideration; /** * Characteristics of the population for which this is intended, or which * typically uses it, e.g. 'adults'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $targetPopulation; /** * Getter of Active Ingredient * * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getActiveIngredient() { return $this->activeIngredient; } /** * Setter of Active Ingredient * * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of activeIngredient */ public function setActiveIngredient($value) { $this->activeIngredient = $value; } /** * Getter of Background * * Descriptive information establishing a historical perspective on the * supplement. May include the rationale for the name, the population * where the supplement first came to prominence, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBackground() { return $this->background; } /** * Setter of Background * * Descriptive information establishing a historical perspective on the * supplement. May include the rationale for the name, the population * where the supplement first came to prominence, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of background */ public function setBackground($value) { $this->background = $value; } /** * Getter of Dosage Form * * A dosage form in which this drug/supplement is available, e.g. * 'tablet', 'suspension', 'injection'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDosageForm() { return $this->dosageForm; } /** * Setter of Dosage Form * * A dosage form in which this drug/supplement is available, e.g. * 'tablet', 'suspension', 'injection'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of dosageForm */ public function setDosageForm($value) { $this->dosageForm = $value; } /** * Getter of Is Proprietary * * True if this item's name is a proprietary/brand name (vs. generic * name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getIsProprietary() { return $this->isProprietary; } /** * Setter of Is Proprietary * * True if this item's name is a proprietary/brand name (vs. generic * name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param bool $value Value of isProprietary */ public function setIsProprietary($value) { $this->isProprietary = $value; } /** * Getter of Legal Status * * The drug or supplement's legal status, including any controlled * substance schedules that apply. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLegalStatus() { return $this->legalStatus; } /** * Setter of Legal Status * * The drug or supplement's legal status, including any controlled * substance schedules that apply. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of legalStatus */ public function setLegalStatus($value) { $this->legalStatus = $value; } /** * Getter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getManufacturer() { return $this->manufacturer; } /** * Setter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of manufacturer */ public function setManufacturer($value) { $this->manufacturer = $value; } /** * Getter of Maximum Intake * * Recommended intake of this supplement for a given population as * defined by a specific recommending authority. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMaximumIntake() { return $this->maximumIntake; } /** * Setter of Maximum Intake * * Recommended intake of this supplement for a given population as * defined by a specific recommending authority. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of maximumIntake */ public function setMaximumIntake($value) { $this->maximumIntake = $value; } /** * Getter of Mechanism of Action * * The specific biochemical interaction through which this drug or * supplement produces its pharmacological effect. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMechanismOfAction() { return $this->mechanismOfAction; } /** * Setter of Mechanism of Action * * The specific biochemical interaction through which this drug or * supplement produces its pharmacological effect. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of mechanismOfAction */ public function setMechanismOfAction($value) { $this->mechanismOfAction = $value; } /** * Getter of Non Proprietary Name * * The generic name of this drug or supplement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getNonProprietaryName() { return $this->nonProprietaryName; } /** * Setter of Non Proprietary Name * * The generic name of this drug or supplement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of nonProprietaryName */ public function setNonProprietaryName($value) { $this->nonProprietaryName = $value; } /** * Getter of Recommended Intake * * Recommended intake of this supplement for a given population as * defined by a specific recommending authority. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRecommendedIntake() { return $this->recommendedIntake; } /** * Setter of Recommended Intake * * Recommended intake of this supplement for a given population as * defined by a specific recommending authority. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of recommendedIntake */ public function setRecommendedIntake($value) { $this->recommendedIntake = $value; } /** * Getter of Safety Consideration * * Any potential safety concern associated with the supplement. May * include interactions with other drugs and foods, pregnancy, * breastfeeding, known adverse reactions, and documented efficacy of the * supplement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSafetyConsideration() { return $this->safetyConsideration; } /** * Setter of Safety Consideration * * Any potential safety concern associated with the supplement. May * include interactions with other drugs and foods, pregnancy, * breastfeeding, known adverse reactions, and documented efficacy of the * supplement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of safetyConsideration */ public function setSafetyConsideration($value) { $this->safetyConsideration = $value; } /** * Getter of Target Population * * Characteristics of the population for which this is intended, or which * typically uses it, e.g. 'adults'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTargetPopulation() { return $this->targetPopulation; } /** * Setter of Target Population * * Characteristics of the population for which this is intended, or which * typically uses it, e.g. 'adults'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of targetPopulation */ public function setTargetPopulation($value) { $this->targetPopulation = $value; } } <file_sep>/GenerationSamples/DrugCostBundle/Model/AbstractDrugCost.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DrugCostBundle\Model; use SchemaRepository\Bundle\DrugCostBundle\Model\DrugCostInterface; use SchemaRepository\Bundle\MedicalIntangibleBundle\Model\AbstractMedicalIntangible; /** * Model of Drug Cost * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DrugCostBundle\Model */ abstract class AbstractDrugCost extends AbstractMedicalIntangible implements DrugCostInterface { /** * The location in which the status applies. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $applicableLocation; /** * The category of cost, such as wholesale, retail, reimbursement cap, * etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $costCategory; /** * The currency (in 3-letter ISO 4217 format) of the drug cost. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $costCurrency; /** * Additional details to capture the origin of the cost data. For * example, 'Medicare Part B'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $costOrigin; /** * The cost per unit of the drug. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal|string */ protected $costPerUnit; /** * The unit in which the drug is measured, e.g. '5 mg tablet'. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $drugUnit; /** * Getter of Applicable Location * * The location in which the status applies. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getApplicableLocation() { return $this->applicableLocation; } /** * Setter of Applicable Location * * The location in which the status applies. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of applicableLocation */ public function setApplicableLocation($value) { $this->applicableLocation = $value; } /** * Getter of Cost Category * * The category of cost, such as wholesale, retail, reimbursement cap, * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCostCategory() { return $this->costCategory; } /** * Setter of Cost Category * * The category of cost, such as wholesale, retail, reimbursement cap, * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of costCategory */ public function setCostCategory($value) { $this->costCategory = $value; } /** * Getter of Cost Currency * * The currency (in 3-letter ISO 4217 format) of the drug cost. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCostCurrency() { return $this->costCurrency; } /** * Setter of Cost Currency * * The currency (in 3-letter ISO 4217 format) of the drug cost. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of costCurrency */ public function setCostCurrency($value) { $this->costCurrency = $value; } /** * Getter of Cost Origin * * Additional details to capture the origin of the cost data. For * example, 'Medicare Part B'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCostOrigin() { return $this->costOrigin; } /** * Setter of Cost Origin * * Additional details to capture the origin of the cost data. For * example, 'Medicare Part B'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of costOrigin */ public function setCostOrigin($value) { $this->costOrigin = $value; } /** * Getter of Cost Per Unit * * The cost per unit of the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal|string */ public function getCostPerUnit() { return $this->costPerUnit; } /** * Setter of Cost Per Unit * * The cost per unit of the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal|string $value Value of costPerUnit */ public function setCostPerUnit($value) { $this->costPerUnit = $value; } /** * Getter of Drug Unit * * The unit in which the drug is measured, e.g. '5 mg tablet'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDrugUnit() { return $this->drugUnit; } /** * Setter of Drug Unit * * The unit in which the drug is measured, e.g. '5 mg tablet'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of drugUnit */ public function setDrugUnit($value) { $this->drugUnit = $value; } } <file_sep>/GenerationSamples/CodeBundle/Model/CodeInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\CodeBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of Code Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\CodeBundle\Model */ interface CodeInterface extends CreativeWorkInterface { /** * Getter of Code Repository * * Link to the repository where the un-compiled, human readable code and * related code is located (SVN, github, CodePlex) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCodeRepository(); /** * Getter of Programming Language * * The computer programming language. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProgrammingLanguage(); /** * Getter of Runtime * * Runtime platform or script interpreter dependencies (Example - Java * v1, Python2.3, .Net Framework 3.0) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRuntime(); /** * Getter of Sample Type * * Full (compile ready) solution, code snippet, inline code, scripts, * template. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSampleType(); /** * Getter of Target Product * * Target Operating System / Product to which the code applies. If * applies to several versions, just the product name can be used. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTargetProduct(); } <file_sep>/GenerationSamples/ExerciseActionBundle/Model/ExerciseActionInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ExerciseActionBundle\Model; use SchemaRepository\Bundle\PlayActionBundle\Model\PlayActionInterface; /** * Interface of Exercise Action Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ExerciseActionBundle\Model */ interface ExerciseActionInterface extends PlayActionInterface { /** * Getter of Course * * A sub property of location. The course where this action was taken. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCourse(); /** * Getter of Diet * * A sub property of instrument. The died used in this action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDiet(); /** * Getter of Distance * * A sub property of asset. The distance travelled. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDistance(); /** * Getter of Exercise Plan * * A sub property of instrument. The exercise plan used on this action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getExercisePlan(); /** * Getter of Exercise Type * * Type(s) of exercise or activity, such as strength training, * flexibility training, aerobics, cardiac rehabilitation, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExerciseType(); /** * Getter of From Location * * A sub property of location. The original location of the object or the * agent before the action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getFromLocation(); /** * Getter of Oponent * * A sub property of participant. The oponent on this action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOponent(); /** * Getter of Sports Activity Location * * A sub property of location. The sports activity location where this * action occurred. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSportsActivityLocation(); /** * Getter of Sports Event * * A sub property of location. The sports event where this action * occurred. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSportsEvent(); /** * Getter of Sports Team * * A sub property of participant. The sports team that participated on * this action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSportsTeam(); /** * Getter of To Location * * A sub property of location. The final location of the object or the * agent after the action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getToLocation(); } <file_sep>/GenerationSamples/MedicalStudyBundle/Model/MedicalStudyInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalStudyBundle\Model; use SchemaRepository\Bundle\MedicalEntityBundle\Model\MedicalEntityInterface; /** * Interface of Medical Study Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalStudyBundle\Model */ interface MedicalStudyInterface extends MedicalEntityInterface { /** * Getter of Outcome * * Expected or actual outcomes of the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOutcome(); /** * Getter of Population * * Any characteristics of the population used in the study, e.g. 'males * under 65'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPopulation(); /** * Getter of Sponsor * * Sponsor of the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSponsor(); /** * Getter of Status * * The status of the study (enumerated). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStatus(); /** * Getter of Study Location * * The location in which the study is taking/took place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudyLocation(); /** * Getter of Study Subject * * A subject of the study, i.e. one of the medical conditions, therapies, * devices, drugs, etc. investigated by the study. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudySubject(); } <file_sep>/GenerationSamples/NerveBundle/Model/AbstractNerve.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\NerveBundle\Model; use SchemaRepository\Bundle\NerveBundle\Model\NerveInterface; use SchemaRepository\Bundle\AnatomicalStructureBundle\Model\AbstractAnatomicalStructure; /** * Model of Nerve * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\NerveBundle\Model */ abstract class AbstractNerve extends AbstractAnatomicalStructure implements NerveInterface { /** * The branches that delineate from the nerve bundle. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $branch; /** * The neurological pathway extension that involves muscle control. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $nerveMotor; /** * The neurological pathway extension that inputs and sends information * to the brain or spinal cord. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $sensoryUnit; /** * The neurological pathway that originates the neurons. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $sourcedFrom; /** * Getter of Branch * * The branches that delineate from the nerve bundle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBranch() { return $this->branch; } /** * Setter of Branch * * The branches that delineate from the nerve bundle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of branch */ public function setBranch($value) { $this->branch = $value; } /** * Getter of Nerve Motor * * The neurological pathway extension that involves muscle control. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getNerveMotor() { return $this->nerveMotor; } /** * Setter of Nerve Motor * * The neurological pathway extension that involves muscle control. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of nerveMotor */ public function setNerveMotor($value) { $this->nerveMotor = $value; } /** * Getter of Sensory Unit * * The neurological pathway extension that inputs and sends information * to the brain or spinal cord. * * @author SchemaGenerator <<EMAIL>@free.fr> * * @access public * * @return mixed */ public function getSensoryUnit() { return $this->sensoryUnit; } /** * Setter of Sensory Unit * * The neurological pathway extension that inputs and sends information * to the brain or spinal cord. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sensoryUnit */ public function setSensoryUnit($value) { $this->sensoryUnit = $value; } /** * Getter of Sourced From * * The neurological pathway that originates the neurons. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSourcedFrom() { return $this->sourcedFrom; } /** * Setter of Sourced From * * The neurological pathway that originates the neurons. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of sourcedFrom */ public function setSourcedFrom($value) { $this->sourcedFrom = $value; } } <file_sep>/GenerationSamples/MedicalDeviceBundle/Model/MedicalDeviceInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalDeviceBundle\Model; use SchemaRepository\Bundle\MedicalEntityBundle\Model\MedicalEntityInterface; /** * Interface of Medical Device Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalDeviceBundle\Model */ interface MedicalDeviceInterface extends MedicalEntityInterface { /** * Getter of Adverse Outcome * * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAdverseOutcome(); /** * Getter of Contraindication * * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContraindication(); /** * Getter of Indication * * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIndication(); /** * Getter of Post Op * * A description of the postoperative procedures, care, and/or followups * for this device. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPostOp(); /** * Getter of Pre Op * * A description of the workup, testing, and other preparations required * before implanting this device. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPreOp(); /** * Getter of Procedure * * A description of the procedure involved in setting up, using, and/or * installing the device. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getProcedure(); /** * Getter of Purpose * * A goal towards an action is taken. Can be concrete or abstract. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPurpose(); /** * Getter of Serious Adverse Outcome * * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeriousAdverseOutcome(); } <file_sep>/GeneratorClass/ClassType.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ class ClassType { private $name; private $label; private $properties = array(); private $propertiesList; private $comment; private $parentName = ''; private $parent = null; /** * @param string $name * @param string $comment * @param array $propertiesList */ public function __construct($name, $label, $comment, $parentName, $propertiesList) { $this->name = $name; $this->label = $label; $this->comment = $comment; $this->propertiesList = $propertiesList; $this->parentName = $parentName; } public function setParent(&$parent) { $this->parent = $parent; } public function addProperty($property) { if( ( $this->parent !== null && !$this->parent->propertyExist($property->getName()) || $this->parent === null ) && $this->propertyExist($property->getName()) ) { $this->properties[$property->getName()] = $property; } } public function getParentName() { return $this->parentName; } public function getLabel() { return $this->label; } public function getName() { return $this->name; } public function propertyExist($name) { return in_array($name, $this->propertiesList); } public function getInterface() { $interface = phpFileHeader() . "namespace " . classTypeNameSpace($this->getName(), 'Model') . ";\n\n"; if($this->parent !== null) { $interface .= "use " . classTypeNameSpace($this->parent->getName(), 'Model', $this->parent->getName().'Interface') . ";\n\n"; } $interface .= "/**\n" . " * Interface of {$this->getLabel()} Model\n" . " * \n" . " * @author SchemaGenerator <<EMAIL>>\n" . " * @package " . classTypeNameSpace($this->getName(), 'Model') . "\n" . " */\n" . "interface {$this->getName()}Interface"; if($this->parent !== null) { $interface .= " extends " . $this->parent->getName().'Interface'; } $interface .= "\n{\n"; foreach ($this->properties as $property) { $interface .= $property->getGetterInterface(); } $interface .= "}\n"; return $interface; } public function getModel() { $model = phpFileHeader() . "namespace " . classTypeNameSpace($this->getName(), 'Model') . ";\n\n" . "use " . classTypeNameSpace($this->getName(), 'Model', $this->getName().'Interface') . ";\n"; if($this->parent !== null) { $model .= "use " . classTypeNameSpace($this->parent->getName(), 'Model', 'Abstract' . $this->parent->getName()) . ";\n\n"; } $model .= "/**\n" . " * Model of {$this->getLabel()}\n" . " * \n" . " * @author SchemaGenerator <<EMAIL>>\n" . " * @package " . classTypeNameSpace($this->getName(), 'Model') . "\n" . " */\n" . "abstract class Abstract{$this->getName()}"; if($this->parent !== null) { $model .= " extends Abstract" . $this->parent->getName(); } $model .= " implements " . $this->getName() . "Interface"; $model .= "\n{\n"; foreach ($this->properties as $property) { $model .= $property->getAttributeDeclaration(); } foreach ($this->properties as $property) { $model .= $property->getGetter(); $model .= $property->getSetter(); } $model .= "}\n"; return $model; } public function getDocument() { global $properties; $document = phpFileHeader() . "namespace " . classTypeNameSpace($this->getName(), 'Document') . ";\n\n" . "use " . classTypeNameSpace($this->getName(), 'Model', 'Abstract'.$this->getName()) . ";\n" . "use Doctrine\ODM\MongoDB\Mapping\Annotations as ODM;\n" . "use Symfony\Component\Validator\Constraints as Assert;\n\n" . "/**\n" . " * {$this->getLabel()} Document\n" . " *\n" . " * @ODM\MappedSuperclass()\n" . " */\n" . "abstract class Abstract{$this->name} extends Abstract{$this->name}\n" . "{\n"; foreach ($this->propertiesList as $property) { $document .= "\n /**\n"; $document .= " * {@inheritdoc}\n"; $document .= " * \n"; switch($properties[$property]->getType()) { case 'string': $document .= " * @ODM\String\n"; $document .= " * @Assert\Type(type=\"string\")\n"; break; case '\\DateTime': $document .= " * @ODM\DateTime\n"; break; case 'array': $document .= " * @ODM\Raw\n"; break; default: if($properties[$property]->getName() !== 'id') { $document .= " * @ODM\Field\n"; } else { $document .= " * @ODM\Id\n"; } } $document .= " */\n"; $document .= " protected \${$property};\n"; } $document .= "}\n"; return $document; } public function getEntity() { global $properties; $entity = phpFileHeader() . "namespace " . classTypeNameSpace($this->getName(), 'Entity') . ";\n\n" . "use " . classTypeNameSpace($this->getName(), 'Model', 'Abstract'.$this->getName()) . ";\n" . "use Doctrine\ORM\Mapping as ORM;\n" . "use Symfony\Component\Validator\Constraints as Assert;\n\n" . "/**\n" . " * {$this->getLabel()} Entity\n" . " *\n" . " * @ODM\MappedSuperclass()\n" . " */\n" . "abstract class Abstract{$this->name} extends Abstract{$this->name}\n" . "{\n"; foreach ($this->propertiesList as $property) { $entity .= "\n /**\n"; $entity .= " * {@inheritdoc}\n"; $entity .= " * \n"; $name = strtolower(preg_replace('/\B([A-Z]{1,})/', "_$1", $property)); switch($properties[$property]->getType()) { case 'string': $entity .= " * @ORM\Column(name=\"$name\", type=\"string\", nullable=true)\n"; $entity .= " * @Assert\Type(type=\"string\")\n"; break; case '\\DateTime': $entity .= " * @ORM\Column(name=\"$name\", type=\"datetime\", nullable=true)\n"; break; case 'integer|long|float|decimal': $entity .= " * @ORM\Column(name=\"$name\", type=\"float\", nullable=true)\n"; break; case 'float|decimal': $entity .= " * @ORM\Column(name=\"$name\", type=\"float\", nullable=true)\n"; break; case 'integer|long': if($properties[$property]->getName() !== 'id') { $entity .= " * @ORM\Column(name=\"$name\", type=\"integer\", nullable=true)\n"; } else { $entity .= " * @ORM\Column(name=\"id\", type=\"integer\")\n"; $entity .= " * @ORM\Id\n"; $entity .= " * @ORM\GeneratedValue(strategy=\"AUTO\")\n"; } break; case 'bool': $entity .= " * @ORM\Column(name=\"$name\", type=\"boolean\", nullable=true)\n"; break; case 'array': $entity .= " * @ORM\Column(name=\"$name\", type=\"array\", nullable=true)\n"; break; default: $entity .= " * @ORM\Column(name=\"$name\", type=\"string\", nullable=true)\n"; } $entity .= " */\n"; $entity .= " protected \${$property};\n"; } $entity .= "}\n"; return $entity; } public function getFormLanguage() { global $properties; $language = ''; foreach(array('admin', 'www') as $env) { $language .= "$env:\n" . " {$this->name}:\n"; foreach ($this->propertiesList as $property) { $language .= " $property:\n"; $language .= " text: " . $properties[$property]->getLabel() . "\n"; } } return $language; } }<file_sep>/GenerationSamples/InfectiousDiseaseBundle/Model/AbstractInfectiousDisease.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\InfectiousDiseaseBundle\Model; use SchemaRepository\Bundle\InfectiousDiseaseBundle\Model\InfectiousDiseaseInterface; use SchemaRepository\Bundle\MedicalConditionBundle\Model\AbstractMedicalCondition; /** * Model of Infectious Disease * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\InfectiousDiseaseBundle\Model */ abstract class AbstractInfectiousDisease extends AbstractMedicalCondition implements InfectiousDiseaseInterface { /** * The actual infectious agent, such as a specific bacterium. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $infectiousAgent; /** * The class of infectious agent (bacteria, prion, etc.) that causes the * disease. * * @author SchemaGenerator <<EMAIL>.fr> * * @access protected * @var mixed */ protected $infectiousAgentClass; /** * How the disease spreads, either as a route or vector, for example * 'direct contact', 'Aedes aegypti', etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $transmissionMethod; /** * Getter of Infectious Agent * * The actual infectious agent, such as a specific bacterium. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInfectiousAgent() { return $this->infectiousAgent; } /** * Setter of Infectious Agent * * The actual infectious agent, such as a specific bacterium. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of infectiousAgent */ public function setInfectiousAgent($value) { $this->infectiousAgent = $value; } /** * Getter of Infectious Agent Class * * The class of infectious agent (bacteria, prion, etc.) that causes the * disease. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInfectiousAgentClass() { return $this->infectiousAgentClass; } /** * Setter of Infectious Agent Class * * The class of infectious agent (bacteria, prion, etc.) that causes the * disease. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of infectiousAgentClass */ public function setInfectiousAgentClass($value) { $this->infectiousAgentClass = $value; } /** * Getter of Transmission Method * * How the disease spreads, either as a route or vector, for example * 'direct contact', 'Aedes aegypti', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTransmissionMethod() { return $this->transmissionMethod; } /** * Setter of Transmission Method * * How the disease spreads, either as a route or vector, for example * 'direct contact', 'Aedes aegypti', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of transmissionMethod */ public function setTransmissionMethod($value) { $this->transmissionMethod = $value; } } <file_sep>/GenerationSamples/MedicalProcedureBundle/Model/MedicalProcedureInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalProcedureBundle\Model; use SchemaRepository\Bundle\MedicalEntityBundle\Model\MedicalEntityInterface; /** * Interface of Medical Procedure Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalProcedureBundle\Model */ interface MedicalProcedureInterface extends MedicalEntityInterface { /** * Getter of Followup * * Typical or recommended followup care after the procedure is performed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFollowup(); /** * Getter of How Performed * * How the procedure is performed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getHowPerformed(); /** * Getter of Preparation * * Typical preparation that a patient must undergo before having the * procedure performed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPreparation(); /** * Getter of Procedure Type * * The type of procedure, for example Surgical, Noninvasive, or * Percutaneous. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProcedureType(); } <file_sep>/GenerationSamples/DatasetBundle/Model/DatasetInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DatasetBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of Dataset Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DatasetBundle\Model */ interface DatasetInterface extends CreativeWorkInterface { /** * Getter of Catalog * * A data catalog which contains a dataset. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCatalog(); /** * Getter of Distribution * * A downloadable form of this dataset, at a specific location, in a * specific format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDistribution(); /** * Getter of Spatial * * The range of spatial applicability of a dataset, e.g. for a dataset of * New York weather, the state of New York. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSpatial(); /** * Getter of Temporal * * The range of temporal applicability of a dataset, e.g. for a 2011 * census dataset, the year 2011 (in ISO 8601 time interval format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return \DateTime */ public function getTemporal(); } <file_sep>/GenerationSamples/RatingBundle/Model/AbstractRating.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\RatingBundle\Model; use SchemaRepository\Bundle\RatingBundle\Model\RatingInterface; use SchemaRepository\Bundle\IntangibleBundle\Model\AbstractIntangible; /** * Model of Rating * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\RatingBundle\Model */ abstract class AbstractRating extends AbstractIntangible implements RatingInterface { /** * The highest value allowed in this rating system. If bestRating is * omitted, 5 is assumed. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal|string */ protected $bestRating; /** * The rating for the content. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $ratingValue; /** * The lowest value allowed in this rating system. If worstRating is * omitted, 1 is assumed. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal|string */ protected $worstRating; /** * Getter of Best Rating * * The highest value allowed in this rating system. If bestRating is * omitted, 5 is assumed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal|string */ public function getBestRating() { return $this->bestRating; } /** * Setter of Best Rating * * The highest value allowed in this rating system. If bestRating is * omitted, 5 is assumed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal|string $value Value of bestRating */ public function setBestRating($value) { $this->bestRating = $value; } /** * Getter of Rating Value * * The rating for the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRatingValue() { return $this->ratingValue; } /** * Setter of Rating Value * * The rating for the content. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of ratingValue */ public function setRatingValue($value) { $this->ratingValue = $value; } /** * Getter of Worst Rating * * The lowest value allowed in this rating system. If worstRating is * omitted, 1 is assumed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal|string */ public function getWorstRating() { return $this->worstRating; } /** * Setter of Worst Rating * * The lowest value allowed in this rating system. If worstRating is * omitted, 1 is assumed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal|string $value Value of worstRating */ public function setWorstRating($value) { $this->worstRating = $value; } } <file_sep>/GenerationSamples/SoftwareApplicationBundle/Model/AbstractSoftwareApplication.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\SoftwareApplicationBundle\Model; use SchemaRepository\Bundle\SoftwareApplicationBundle\Model\SoftwareApplicationInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of Software Application * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\SoftwareApplicationBundle\Model */ abstract class AbstractSoftwareApplication extends AbstractCreativeWork implements SoftwareApplicationInterface { /** * Type of software application, e.g. "Game, Multimedia". * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $applicationCategory; /** * Subcategory of the application, e.g. "Arcade Game". * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $applicationSubCategory; /** * The name of the application suite to which the application belongs * (e.g. Excel belongs to Office) * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $applicationSuite; /** * Countries for which the application is not supported. You can also * provide the two-letter ISO 3166-1 alpha-2 country code. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $countriesNotSupported; /** * Countries for which the application is supported. You can also provide * the two-letter ISO 3166-1 alpha-2 country code. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $countriesSupported; /** * Device required to run the application. Used in cases where a specific * make/model is required to run the application. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $device; /** * If the file can be downloaded, URL to download the binary. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $downloadUrl; /** * Features or modules provided by this application (and possibly * required by other applications). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $featureList; /** * MIME format of the binary (e.g. application/zip). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $fileFormat; /** * Size of the application / package (e.g. 18MB). In the absence of a * unit (MB, KB etc.), KB will be assumed. * * @author SchemaGenerator <<EMAIL>.fr> * * @access protected * @var integer|long */ protected $fileSize; /** * URL at which the app may be installed, if different from the URL of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $installUrl; /** * Minimum memory requirements. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $memoryRequirements; /** * Operating systems supported (Windows 7, OSX 10.6, Android 1.6). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $operatingSystem; /** * Permission(s) required to run the app (for example, a mobile app may * require full internet access or may run only on wifi). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $permissions; /** * Processor architecture required to run the application (e.g. IA64). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $processorRequirements; /** * Description of what changed in this version. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $releaseNotes; /** * Component dependency requirements for application. This includes * runtime environments and shared libraries that are not included in the * application distribution package, but required to run the application * (Examples: DirectX, Java or .NET runtime). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $requirements; /** * A link to a screenshot image of the app. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $screenshot; /** * Version of the software instance. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $softwareVersion; /** * Storage requirements (free space required). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|string */ protected $storageRequirements; /** * Getter of Application Category * * Type of software application, e.g. "Game, Multimedia". * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getApplicationCategory() { return $this->applicationCategory; } /** * Setter of Application Category * * Type of software application, e.g. "Game, Multimedia". * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of applicationCategory */ public function setApplicationCategory($value) { $this->applicationCategory = $value; } /** * Getter of Application Sub Category * * Subcategory of the application, e.g. "Arcade Game". * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getApplicationSubCategory() { return $this->applicationSubCategory; } /** * Setter of Application Sub Category * * Subcategory of the application, e.g. "Arcade Game". * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of applicationSubCategory */ public function setApplicationSubCategory($value) { $this->applicationSubCategory = $value; } /** * Getter of Application Suite * * The name of the application suite to which the application belongs * (e.g. Excel belongs to Office) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getApplicationSuite() { return $this->applicationSuite; } /** * Setter of Application Suite * * The name of the application suite to which the application belongs * (e.g. Excel belongs to Office) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of applicationSuite */ public function setApplicationSuite($value) { $this->applicationSuite = $value; } /** * Getter of Countries Not Supported * * Countries for which the application is not supported. You can also * provide the two-letter ISO 3166-1 alpha-2 country code. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCountriesNotSupported() { return $this->countriesNotSupported; } /** * Setter of Countries Not Supported * * Countries for which the application is not supported. You can also * provide the two-letter ISO 3166-1 alpha-2 country code. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of countriesNotSupported */ public function setCountriesNotSupported($value) { $this->countriesNotSupported = $value; } /** * Getter of Countries Supported * * Countries for which the application is supported. You can also provide * the two-letter ISO 3166-1 alpha-2 country code. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCountriesSupported() { return $this->countriesSupported; } /** * Setter of Countries Supported * * Countries for which the application is supported. You can also provide * the two-letter ISO 3166-1 alpha-2 country code. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of countriesSupported */ public function setCountriesSupported($value) { $this->countriesSupported = $value; } /** * Getter of Device * * Device required to run the application. Used in cases where a specific * make/model is required to run the application. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDevice() { return $this->device; } /** * Setter of Device * * Device required to run the application. Used in cases where a specific * make/model is required to run the application. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of device */ public function setDevice($value) { $this->device = $value; } /** * Getter of Download Url * * If the file can be downloaded, URL to download the binary. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDownloadUrl() { return $this->downloadUrl; } /** * Setter of Download Url * * If the file can be downloaded, URL to download the binary. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of downloadUrl */ public function setDownloadUrl($value) { $this->downloadUrl = $value; } /** * Getter of Feature List * * Features or modules provided by this application (and possibly * required by other applications). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getFeatureList() { return $this->featureList; } /** * Setter of Feature List * * Features or modules provided by this application (and possibly * required by other applications). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of featureList */ public function setFeatureList($value) { $this->featureList = $value; } /** * Getter of File Format * * MIME format of the binary (e.g. application/zip). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFileFormat() { return $this->fileFormat; } /** * Setter of File Format * * MIME format of the binary (e.g. application/zip). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of fileFormat */ public function setFileFormat($value) { $this->fileFormat = $value; } /** * Getter of File Size * * Size of the application / package (e.g. 18MB). In the absence of a * unit (MB, KB etc.), KB will be assumed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long */ public function getFileSize() { return $this->fileSize; } /** * Setter of File Size * * Size of the application / package (e.g. 18MB). In the absence of a * unit (MB, KB etc.), KB will be assumed. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long $value Value of fileSize */ public function setFileSize($value) { $this->fileSize = $value; } /** * Getter of Install Url * * URL at which the app may be installed, if different from the URL of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getInstallUrl() { return $this->installUrl; } /** * Setter of Install Url * * URL at which the app may be installed, if different from the URL of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of installUrl */ public function setInstallUrl($value) { $this->installUrl = $value; } /** * Getter of Memory Requirements * * Minimum memory requirements. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getMemoryRequirements() { return $this->memoryRequirements; } /** * Setter of Memory Requirements * * Minimum memory requirements. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of memoryRequirements */ public function setMemoryRequirements($value) { $this->memoryRequirements = $value; } /** * Getter of Operating System * * Operating systems supported (Windows 7, OSX 10.6, Android 1.6). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOperatingSystem() { return $this->operatingSystem; } /** * Setter of Operating System * * Operating systems supported (Windows 7, OSX 10.6, Android 1.6). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of operatingSystem */ public function setOperatingSystem($value) { $this->operatingSystem = $value; } /** * Getter of Permissions * * Permission(s) required to run the app (for example, a mobile app may * require full internet access or may run only on wifi). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPermissions() { return $this->permissions; } /** * Setter of Permissions * * Permission(s) required to run the app (for example, a mobile app may * require full internet access or may run only on wifi). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of permissions */ public function setPermissions($value) { $this->permissions = $value; } /** * Getter of Processor Requirements * * Processor architecture required to run the application (e.g. IA64). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getProcessorRequirements() { return $this->processorRequirements; } /** * Setter of Processor Requirements * * Processor architecture required to run the application (e.g. IA64). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of processorRequirements */ public function setProcessorRequirements($value) { $this->processorRequirements = $value; } /** * Getter of Release Notes * * Description of what changed in this version. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getReleaseNotes() { return $this->releaseNotes; } /** * Setter of Release Notes * * Description of what changed in this version. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of releaseNotes */ public function setReleaseNotes($value) { $this->releaseNotes = $value; } /** * Getter of Requirements * * Component dependency requirements for application. This includes * runtime environments and shared libraries that are not included in the * application distribution package, but required to run the application * (Examples: DirectX, Java or .NET runtime). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getRequirements() { return $this->requirements; } /** * Setter of Requirements * * Component dependency requirements for application. This includes * runtime environments and shared libraries that are not included in the * application distribution package, but required to run the application * (Examples: DirectX, Java or .NET runtime). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of requirements */ public function setRequirements($value) { $this->requirements = $value; } /** * Getter of Screenshot * * A link to a screenshot image of the app. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getScreenshot() { return $this->screenshot; } /** * Setter of Screenshot * * A link to a screenshot image of the app. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of screenshot */ public function setScreenshot($value) { $this->screenshot = $value; } /** * Getter of Software Version * * Version of the software instance. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSoftwareVersion() { return $this->softwareVersion; } /** * Setter of Software Version * * Version of the software instance. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of softwareVersion */ public function setSoftwareVersion($value) { $this->softwareVersion = $value; } /** * Getter of Storage Requirements * * Storage requirements (free space required). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getStorageRequirements() { return $this->storageRequirements; } /** * Setter of Storage Requirements * * Storage requirements (free space required). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|string $value Value of storageRequirements */ public function setStorageRequirements($value) { $this->storageRequirements = $value; } } <file_sep>/GenerationSamples/MedicalAudienceBundle/Model/AbstractMedicalAudience.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalAudienceBundle\Model; use SchemaRepository\Bundle\MedicalAudienceBundle\Model\MedicalAudienceInterface; use SchemaRepository\Bundle\AudienceBundle\Model\AbstractAudience; /** * Model of Medical Audience * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalAudienceBundle\Model */ abstract class AbstractMedicalAudience extends AbstractAudience implements MedicalAudienceInterface { /** * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $alternateName; /** * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $code; /** * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $guideline; /** * Expectations for health conditions of target audience * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $healthCondition; /** * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $medicineSystem; /** * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $recognizingAuthority; /** * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relevantSpecialty; /** * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $study; /** * The gender of the person or audience. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $suggestedGender; /** * Maximal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $suggestedMaxAge; /** * Minimal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $suggestedMinAge; /** * Getter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlternateName() { return $this->alternateName; } /** * Setter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of alternateName */ public function setAlternateName($value) { $this->alternateName = $value; } /** * Getter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCode() { return $this->code; } /** * Setter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of code */ public function setCode($value) { $this->code = $value; } /** * Getter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGuideline() { return $this->guideline; } /** * Setter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of guideline */ public function setGuideline($value) { $this->guideline = $value; } /** * Getter of Health Condition * * Expectations for health conditions of target audience * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHealthCondition() { return $this->healthCondition; } /** * Setter of Health Condition * * Expectations for health conditions of target audience * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of healthCondition */ public function setHealthCondition($value) { $this->healthCondition = $value; } /** * Getter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMedicineSystem() { return $this->medicineSystem; } /** * Setter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of medicineSystem */ public function setMedicineSystem($value) { $this->medicineSystem = $value; } /** * Getter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRecognizingAuthority() { return $this->recognizingAuthority; } /** * Setter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of recognizingAuthority */ public function setRecognizingAuthority($value) { $this->recognizingAuthority = $value; } /** * Getter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelevantSpecialty() { return $this->relevantSpecialty; } /** * Setter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relevantSpecialty */ public function setRelevantSpecialty($value) { $this->relevantSpecialty = $value; } /** * Getter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudy() { return $this->study; } /** * Setter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of study */ public function setStudy($value) { $this->study = $value; } /** * Getter of Suggested Gender * * The gender of the person or audience. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSuggestedGender() { return $this->suggestedGender; } /** * Setter of Suggested Gender * * The gender of the person or audience. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of suggestedGender */ public function setSuggestedGender($value) { $this->suggestedGender = $value; } /** * Getter of Suggested Max Age * * Maximal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getSuggestedMaxAge() { return $this->suggestedMaxAge; } /** * Setter of Suggested Max Age * * Maximal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of suggestedMaxAge */ public function setSuggestedMaxAge($value) { $this->suggestedMaxAge = $value; } /** * Getter of Suggested Min Age * * Minimal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getSuggestedMinAge() { return $this->suggestedMinAge; } /** * Setter of Suggested Min Age * * Minimal age recommended for viewing content * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of suggestedMinAge */ public function setSuggestedMinAge($value) { $this->suggestedMinAge = $value; } } <file_sep>/GenerationSamples/EventBundle/Model/EventInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\EventBundle\Model; use SchemaRepository\Bundle\ThingBundle\Model\ThingInterface; /** * Interface of Event Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\EventBundle\Model */ interface EventInterface extends ThingInterface { /** * Getter of Attendee * * A person or organization attending the event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAttendee(); /** * Getter of Attendees * * A person attending the event (legacy spelling; see singular form, * attendee). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getAttendees(); /** * Getter of Duration * * The duration of the item (movie, audio recording, event, etc.) in ISO * 8601 date format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDuration(); /** * Getter of End Date * * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEndDate(); /** * Getter of Location * * The location of the event, organization or action. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLocation(); /** * Getter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOffers(); /** * Getter of Performer * * A performer at the event—for example, a presenter, musician, musical * group or actor. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPerformer(); /** * Getter of Performers * * The main performer or performers of the event—for example, a * presenter, musician, or actor (legacy spelling; see singular form, * performer). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getPerformers(); /** * Getter of Start Date * * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStartDate(); /** * Getter of Sub Event * * An Event that is part of this event. For example, a conference event * includes many presentations, each are a subEvent of the conference. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSubEvent(); /** * Getter of Sub Events * * Events that are a part of this event. For example, a conference event * includes many presentations, each are subEvents of the conference * (legacy spelling; see singular form, subEvent). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getSubEvents(); /** * Getter of Super Event * * An event that this event is a part of. For example, a collection of * individual music performances might each have a music festival as * their superEvent. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSuperEvent(); } <file_sep>/GenerationSamples/ExercisePlanBundle/Model/AbstractExercisePlan.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ExercisePlanBundle\Model; use SchemaRepository\Bundle\ExercisePlanBundle\Model\ExercisePlanInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of Exercise Plan * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ExercisePlanBundle\Model */ abstract class AbstractExercisePlan extends AbstractCreativeWork implements ExercisePlanInterface { /** * Length of time to engage in the activity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $activityDuration; /** * How often one should engage in the activity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $activityFrequency; /** * Any additional component of the exercise prescription that may need to * be articulated to the patient. This may include the order of * exercises, the number of repetitions of movement, quantitative * distance, progressions over time, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $additionalVariable; /** * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $adverseOutcome; /** * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $alternateName; /** * The anatomy of the underlying organ system or structures associated * with this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $associatedAnatomy; /** * A category for the item. Greater signs or slashes can be used to * informally indicate a category hierarchy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $category; /** * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $code; /** * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $contraindication; /** * A therapy that duplicates or overlaps this one. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $duplicateTherapy; /** * The characteristics of associated patients, such as age, gender, race * etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $epidemiology; /** * Type(s) of exercise or activity, such as strength training, * flexibility training, aerobics, cardiac rehabilitation, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $exerciseType; /** * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $guideline; /** * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $indication; /** * Quantitative measure gauging the degree of force involved in the * exercise, for example, heartbeats per minute. May include the velocity * of the movement. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $intensity; /** * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $medicineSystem; /** * Changes in the normal mechanical, physical, and biochemical functions * that are associated with this activity or condition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $pathophysiology; /** * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $recognizingAuthority; /** * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relevantSpecialty; /** * Number of times one should repeat the activity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $repetitions; /** * How often one should break from the activity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $restPeriods; /** * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $seriousAdverseOutcome; /** * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $study; /** * Quantitative measure of the physiologic output of the exercise; also * referred to as energy expenditure. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $workload; /** * Getter of Activity Duration * * Length of time to engage in the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getActivityDuration() { return $this->activityDuration; } /** * Setter of Activity Duration * * Length of time to engage in the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of activityDuration */ public function setActivityDuration($value) { $this->activityDuration = $value; } /** * Getter of Activity Frequency * * How often one should engage in the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getActivityFrequency() { return $this->activityFrequency; } /** * Setter of Activity Frequency * * How often one should engage in the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of activityFrequency */ public function setActivityFrequency($value) { $this->activityFrequency = $value; } /** * Getter of Additional Variable * * Any additional component of the exercise prescription that may need to * be articulated to the patient. This may include the order of * exercises, the number of repetitions of movement, quantitative * distance, progressions over time, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAdditionalVariable() { return $this->additionalVariable; } /** * Setter of Additional Variable * * Any additional component of the exercise prescription that may need to * be articulated to the patient. This may include the order of * exercises, the number of repetitions of movement, quantitative * distance, progressions over time, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of additionalVariable */ public function setAdditionalVariable($value) { $this->additionalVariable = $value; } /** * Getter of Adverse Outcome * * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAdverseOutcome() { return $this->adverseOutcome; } /** * Setter of Adverse Outcome * * A possible complication and/or side effect of this therapy. If it is * known that an adverse outcome is serious (resulting in death, * disability, or permanent damage; requiring hospitalization; or is * otherwise life-threatening or requires immediate medical attention), * tag it as a seriouseAdverseOutcome instead. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of adverseOutcome */ public function setAdverseOutcome($value) { $this->adverseOutcome = $value; } /** * Getter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlternateName() { return $this->alternateName; } /** * Setter of Alternate Name * * Any alternate name for this medical entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of alternateName */ public function setAlternateName($value) { $this->alternateName = $value; } /** * Getter of Associated Anatomy * * The anatomy of the underlying organ system or structures associated * with this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAssociatedAnatomy() { return $this->associatedAnatomy; } /** * Setter of Associated Anatomy * * The anatomy of the underlying organ system or structures associated * with this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of associatedAnatomy */ public function setAssociatedAnatomy($value) { $this->associatedAnatomy = $value; } /** * Getter of Category * * A category for the item. Greater signs or slashes can be used to * informally indicate a category hierarchy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCategory() { return $this->category; } /** * Setter of Category * * A category for the item. Greater signs or slashes can be used to * informally indicate a category hierarchy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of category */ public function setCategory($value) { $this->category = $value; } /** * Getter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCode() { return $this->code; } /** * Setter of Code * * A medical code for the entity, taken from a controlled vocabulary or * ontology such as ICD-9, DiseasesDB, MeSH, SNOMED-CT, RxNorm, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of code */ public function setCode($value) { $this->code = $value; } /** * Getter of Contraindication * * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContraindication() { return $this->contraindication; } /** * Setter of Contraindication * * A contraindication for this therapy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of contraindication */ public function setContraindication($value) { $this->contraindication = $value; } /** * Getter of Duplicate Therapy * * A therapy that duplicates or overlaps this one. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDuplicateTherapy() { return $this->duplicateTherapy; } /** * Setter of Duplicate Therapy * * A therapy that duplicates or overlaps this one. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of duplicateTherapy */ public function setDuplicateTherapy($value) { $this->duplicateTherapy = $value; } /** * Getter of Epidemiology * * The characteristics of associated patients, such as age, gender, race * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEpidemiology() { return $this->epidemiology; } /** * Setter of Epidemiology * * The characteristics of associated patients, such as age, gender, race * etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of epidemiology */ public function setEpidemiology($value) { $this->epidemiology = $value; } /** * Getter of Exercise Type * * Type(s) of exercise or activity, such as strength training, * flexibility training, aerobics, cardiac rehabilitation, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExerciseType() { return $this->exerciseType; } /** * Setter of Exercise Type * * Type(s) of exercise or activity, such as strength training, * flexibility training, aerobics, cardiac rehabilitation, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of exerciseType */ public function setExerciseType($value) { $this->exerciseType = $value; } /** * Getter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGuideline() { return $this->guideline; } /** * Setter of Guideline * * A medical guideline related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of guideline */ public function setGuideline($value) { $this->guideline = $value; } /** * Getter of Indication * * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIndication() { return $this->indication; } /** * Setter of Indication * * A factor that indicates use of this therapy for treatment and/or * prevention of a condition, symptom, etc. For therapies such as drugs, * indications can include both officially-approved indications as well * as off-label uses. These can be distinguished by using the * ApprovedIndication subtype of MedicalIndication. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of indication */ public function setIndication($value) { $this->indication = $value; } /** * Getter of Intensity * * Quantitative measure gauging the degree of force involved in the * exercise, for example, heartbeats per minute. May include the velocity * of the movement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIntensity() { return $this->intensity; } /** * Setter of Intensity * * Quantitative measure gauging the degree of force involved in the * exercise, for example, heartbeats per minute. May include the velocity * of the movement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of intensity */ public function setIntensity($value) { $this->intensity = $value; } /** * Getter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMedicineSystem() { return $this->medicineSystem; } /** * Setter of Medicine System * * The system of medicine that includes this MedicalEntity, for example * 'evidence-based', 'homeopathic', 'chiropractic', etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of medicineSystem */ public function setMedicineSystem($value) { $this->medicineSystem = $value; } /** * Getter of Pathophysiology * * Changes in the normal mechanical, physical, and biochemical functions * that are associated with this activity or condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPathophysiology() { return $this->pathophysiology; } /** * Setter of Pathophysiology * * Changes in the normal mechanical, physical, and biochemical functions * that are associated with this activity or condition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of pathophysiology */ public function setPathophysiology($value) { $this->pathophysiology = $value; } /** * Getter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRecognizingAuthority() { return $this->recognizingAuthority; } /** * Setter of Recognizing Authority * * If applicable, the organization that officially recognizes this entity * as part of its endorsed system of medicine. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of recognizingAuthority */ public function setRecognizingAuthority($value) { $this->recognizingAuthority = $value; } /** * Getter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelevantSpecialty() { return $this->relevantSpecialty; } /** * Setter of Relevant Specialty * * If applicable, a medical specialty in which this entity is relevant. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relevantSpecialty */ public function setRelevantSpecialty($value) { $this->relevantSpecialty = $value; } /** * Getter of Repetitions * * Number of times one should repeat the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getRepetitions() { return $this->repetitions; } /** * Setter of Repetitions * * Number of times one should repeat the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of repetitions */ public function setRepetitions($value) { $this->repetitions = $value; } /** * Getter of Rest Periods * * How often one should break from the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getRestPeriods() { return $this->restPeriods; } /** * Setter of Rest Periods * * How often one should break from the activity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of restPeriods */ public function setRestPeriods($value) { $this->restPeriods = $value; } /** * Getter of Serious Adverse Outcome * * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSeriousAdverseOutcome() { return $this->seriousAdverseOutcome; } /** * Setter of Serious Adverse Outcome * * A possible serious complication and/or serious side effect of this * therapy. Serious adverse outcomes include those that are * life-threatening; result in death, disability, or permanent damage; * require hospitalization or prolong existing hospitalization; cause * congenital anomalies or birth defects; or jeopardize the patient and * may require medical or surgical intervention to prevent one of the * outcomes in this definition. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of seriousAdverseOutcome */ public function setSeriousAdverseOutcome($value) { $this->seriousAdverseOutcome = $value; } /** * Getter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStudy() { return $this->study; } /** * Setter of Study * * A medical study or trial related to this entity. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of study */ public function setStudy($value) { $this->study = $value; } /** * Getter of Workload * * Quantitative measure of the physiologic output of the exercise; also * referred to as energy expenditure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWorkload() { return $this->workload; } /** * Setter of Workload * * Quantitative measure of the physiologic output of the exercise; also * referred to as energy expenditure. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of workload */ public function setWorkload($value) { $this->workload = $value; } } <file_sep>/GenerationSamples/AnatomicalSystemBundle/Model/AbstractAnatomicalSystem.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\AnatomicalSystemBundle\Model; use SchemaRepository\Bundle\AnatomicalSystemBundle\Model\AnatomicalSystemInterface; use SchemaRepository\Bundle\MedicalEntityBundle\Model\AbstractMedicalEntity; /** * Model of Anatomical System * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\AnatomicalSystemBundle\Model */ abstract class AbstractAnatomicalSystem extends AbstractMedicalEntity implements AnatomicalSystemInterface { /** * If applicable, a description of the pathophysiology associated with * the anatomical system, including potential abnormal changes in the * mechanical, physical, and biochemical functions of the system. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $associatedPathophysiology; /** * The underlying anatomical structures, such as organs, that comprise * the anatomical system. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $comprisedOf; /** * A medical condition associated with this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relatedCondition; /** * Related anatomical structure(s) that are not part of the system but * relate or connect to it, such as vascular bundles associated with an * organ system. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relatedStructure; /** * A medical therapy related to this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $relatedTherapy; /** * Getter of Associated Pathophysiology * * If applicable, a description of the pathophysiology associated with * the anatomical system, including potential abnormal changes in the * mechanical, physical, and biochemical functions of the system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAssociatedPathophysiology() { return $this->associatedPathophysiology; } /** * Setter of Associated Pathophysiology * * If applicable, a description of the pathophysiology associated with * the anatomical system, including potential abnormal changes in the * mechanical, physical, and biochemical functions of the system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of associatedPathophysiology */ public function setAssociatedPathophysiology($value) { $this->associatedPathophysiology = $value; } /** * Getter of Comprised of * * The underlying anatomical structures, such as organs, that comprise * the anatomical system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getComprisedOf() { return $this->comprisedOf; } /** * Setter of Comprised of * * The underlying anatomical structures, such as organs, that comprise * the anatomical system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of comprisedOf */ public function setComprisedOf($value) { $this->comprisedOf = $value; } /** * Getter of Related Condition * * A medical condition associated with this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedCondition() { return $this->relatedCondition; } /** * Setter of Related Condition * * A medical condition associated with this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relatedCondition */ public function setRelatedCondition($value) { $this->relatedCondition = $value; } /** * Getter of Related Structure * * Related anatomical structure(s) that are not part of the system but * relate or connect to it, such as vascular bundles associated with an * organ system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedStructure() { return $this->relatedStructure; } /** * Setter of Related Structure * * Related anatomical structure(s) that are not part of the system but * relate or connect to it, such as vascular bundles associated with an * organ system. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relatedStructure */ public function setRelatedStructure($value) { $this->relatedStructure = $value; } /** * Getter of Related Therapy * * A medical therapy related to this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedTherapy() { return $this->relatedTherapy; } /** * Setter of Related Therapy * * A medical therapy related to this anatomy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of relatedTherapy */ public function setRelatedTherapy($value) { $this->relatedTherapy = $value; } } <file_sep>/GenerationSamples/GeoShapeBundle/Model/GeoShapeInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\GeoShapeBundle\Model; use SchemaRepository\Bundle\StructuredValueBundle\Model\StructuredValueInterface; /** * Interface of Geo Shape Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\GeoShapeBundle\Model */ interface GeoShapeInterface extends StructuredValueInterface { /** * Getter of Box * * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBox(); /** * Getter of Circle * * A circle is the circular region of a specified radius centered at a * specified latitude and longitude. A circle is expressed as a pair * followed by a radius in meters. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCircle(); /** * Getter of Elevation * * The elevation of a location. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|integer|long|float|decimal */ public function getElevation(); /** * Getter of Line * * A line is a point-to-point path consisting of two or more points. A * line is expressed as a series of two or more point objects separated * by space. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getLine(); /** * Getter of Polygon * * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPolygon(); } <file_sep>/GenerationSamples/JobPostingBundle/Model/AbstractJobPosting.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\JobPostingBundle\Model; use SchemaRepository\Bundle\JobPostingBundle\Model\JobPostingInterface; use SchemaRepository\Bundle\IntangibleBundle\Model\AbstractIntangible; /** * Model of Job Posting * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\JobPostingBundle\Model */ abstract class AbstractJobPosting extends AbstractIntangible implements JobPostingInterface { /** * The base salary of the job. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $baseSalary; /** * Description of benefits associated with the job. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $benefits; /** * Publication date for the job posting. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $datePosted; /** * Educational background needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $educationRequirements; /** * Type of employment (e.g. full-time, part-time, contract, temporary, * seasonal, internship). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $employmentType; /** * Description of skills and experience needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $experienceRequirements; /** * Organization offering the job position. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $hiringOrganization; /** * Description of bonus and commission compensation aspects of the job. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $incentives; /** * The industry associated with the job position. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $industry; /** * A (typically single) geographic location associated with the job * position. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $jobLocation; /** * Category or categories describing the job. Use BLS O*NET-SOC taxonomy: * http://www.onetcenter.org/taxonomy.html. Ideally includes textual * label and formal code, with the property repeated for each applicable * value. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $occupationalCategory; /** * Specific qualifications required for this role. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $qualifications; /** * Responsibilities associated with this role. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $responsibilities; /** * The currency (coded using ISO 4217, * http://en.wikipedia.org/wiki/ISO_4217 used for the main salary * information in this job posting. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $salaryCurrency; /** * Skills required to fulfill this role. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $skills; /** * Any special commitments associated with this job posting. Valid * entries include VeteranCommit, MilitarySpouseCommit, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $specialCommitments; /** * The title of the job. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $title; /** * The typical working hours for this job (e.g. 1st shift, night shift, * 8am-5pm). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $workHours; /** * Getter of Base Salary * * The base salary of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getBaseSalary() { return $this->baseSalary; } /** * Setter of Base Salary * * The base salary of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of baseSalary */ public function setBaseSalary($value) { $this->baseSalary = $value; } /** * Getter of Benefits * * Description of benefits associated with the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBenefits() { return $this->benefits; } /** * Setter of Benefits * * Description of benefits associated with the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of benefits */ public function setBenefits($value) { $this->benefits = $value; } /** * Getter of Date Posted * * Publication date for the job posting. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDatePosted() { return $this->datePosted; } /** * Setter of Date Posted * * Publication date for the job posting. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of datePosted */ public function setDatePosted($value) { $this->datePosted = $value; } /** * Getter of Education Requirements * * Educational background needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEducationRequirements() { return $this->educationRequirements; } /** * Setter of Education Requirements * * Educational background needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of educationRequirements */ public function setEducationRequirements($value) { $this->educationRequirements = $value; } /** * Getter of Employment Type * * Type of employment (e.g. full-time, part-time, contract, temporary, * seasonal, internship). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEmploymentType() { return $this->employmentType; } /** * Setter of Employment Type * * Type of employment (e.g. full-time, part-time, contract, temporary, * seasonal, internship). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of employmentType */ public function setEmploymentType($value) { $this->employmentType = $value; } /** * Getter of Experience Requirements * * Description of skills and experience needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExperienceRequirements() { return $this->experienceRequirements; } /** * Setter of Experience Requirements * * Description of skills and experience needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of experienceRequirements */ public function setExperienceRequirements($value) { $this->experienceRequirements = $value; } /** * Getter of Hiring Organization * * Organization offering the job position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHiringOrganization() { return $this->hiringOrganization; } /** * Setter of Hiring Organization * * Organization offering the job position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of hiringOrganization */ public function setHiringOrganization($value) { $this->hiringOrganization = $value; } /** * Getter of Incentives * * Description of bonus and commission compensation aspects of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIncentives() { return $this->incentives; } /** * Setter of Incentives * * Description of bonus and commission compensation aspects of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of incentives */ public function setIncentives($value) { $this->incentives = $value; } /** * Getter of Industry * * The industry associated with the job position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIndustry() { return $this->industry; } /** * Setter of Industry * * The industry associated with the job position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of industry */ public function setIndustry($value) { $this->industry = $value; } /** * Getter of Job Location * * A (typically single) geographic location associated with the job * position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getJobLocation() { return $this->jobLocation; } /** * Setter of Job Location * * A (typically single) geographic location associated with the job * position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of jobLocation */ public function setJobLocation($value) { $this->jobLocation = $value; } /** * Getter of Occupational Category * * Category or categories describing the job. Use BLS O*NET-SOC taxonomy: * http://www.onetcenter.org/taxonomy.html. Ideally includes textual * label and formal code, with the property repeated for each applicable * value. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOccupationalCategory() { return $this->occupationalCategory; } /** * Setter of Occupational Category * * Category or categories describing the job. Use BLS O*NET-SOC taxonomy: * http://www.onetcenter.org/taxonomy.html. Ideally includes textual * label and formal code, with the property repeated for each applicable * value. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of occupationalCategory */ public function setOccupationalCategory($value) { $this->occupationalCategory = $value; } /** * Getter of Qualifications * * Specific qualifications required for this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getQualifications() { return $this->qualifications; } /** * Setter of Qualifications * * Specific qualifications required for this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of qualifications */ public function setQualifications($value) { $this->qualifications = $value; } /** * Getter of Responsibilities * * Responsibilities associated with this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getResponsibilities() { return $this->responsibilities; } /** * Setter of Responsibilities * * Responsibilities associated with this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of responsibilities */ public function setResponsibilities($value) { $this->responsibilities = $value; } /** * Getter of Salary Currency * * The currency (coded using ISO 4217, * http://en.wikipedia.org/wiki/ISO_4217 used for the main salary * information in this job posting. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSalaryCurrency() { return $this->salaryCurrency; } /** * Setter of Salary Currency * * The currency (coded using ISO 4217, * http://en.wikipedia.org/wiki/ISO_4217 used for the main salary * information in this job posting. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of salaryCurrency */ public function setSalaryCurrency($value) { $this->salaryCurrency = $value; } /** * Getter of Skills * * Skills required to fulfill this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSkills() { return $this->skills; } /** * Setter of Skills * * Skills required to fulfill this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of skills */ public function setSkills($value) { $this->skills = $value; } /** * Getter of Special Commitments * * Any special commitments associated with this job posting. Valid * entries include VeteranCommit, MilitarySpouseCommit, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSpecialCommitments() { return $this->specialCommitments; } /** * Setter of Special Commitments * * Any special commitments associated with this job posting. Valid * entries include VeteranCommit, MilitarySpouseCommit, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of specialCommitments */ public function setSpecialCommitments($value) { $this->specialCommitments = $value; } /** * Getter of Title * * The title of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTitle() { return $this->title; } /** * Setter of Title * * The title of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of title */ public function setTitle($value) { $this->title = $value; } /** * Getter of Work Hours * * The typical working hours for this job (e.g. 1st shift, night shift, * 8am-5pm). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getWorkHours() { return $this->workHours; } /** * Setter of Work Hours * * The typical working hours for this job (e.g. 1st shift, night shift, * 8am-5pm). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of workHours */ public function setWorkHours($value) { $this->workHours = $value; } } <file_sep>/GenerationSamples/ReviewBundle/Model/AbstractReview.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ReviewBundle\Model; use SchemaRepository\Bundle\ReviewBundle\Model\ReviewInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of Review * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ReviewBundle\Model */ abstract class AbstractReview extends AbstractCreativeWork implements ReviewInterface { /** * The item that is being reviewed/rated. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $itemReviewed; /** * The actual body of the review * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $reviewBody; /** * The rating given in this review. Note that reviews can themselves be * rated. The reviewRating applies to rating given by the review. The * aggregateRating property applies to the review itself, as a creative * work. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $reviewRating; /** * Getter of Item Reviewed * * The item that is being reviewed/rated. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getItemReviewed() { return $this->itemReviewed; } /** * Setter of Item Reviewed * * The item that is being reviewed/rated. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of itemReviewed */ public function setItemReviewed($value) { $this->itemReviewed = $value; } /** * Getter of Review Body * * The actual body of the review * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getReviewBody() { return $this->reviewBody; } /** * Setter of Review Body * * The actual body of the review * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of reviewBody */ public function setReviewBody($value) { $this->reviewBody = $value; } /** * Getter of Review Rating * * The rating given in this review. Note that reviews can themselves be * rated. The reviewRating applies to rating given by the review. The * aggregateRating property applies to the review itself, as a creative * work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReviewRating() { return $this->reviewRating; } /** * Setter of Review Rating * * The rating given in this review. Note that reviews can themselves be * rated. The reviewRating applies to rating given by the review. The * aggregateRating property applies to the review itself, as a creative * work. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of reviewRating */ public function setReviewRating($value) { $this->reviewRating = $value; } } <file_sep>/GenerationSamples/DrugStrengthBundle/Model/AbstractDrugStrength.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DrugStrengthBundle\Model; use SchemaRepository\Bundle\DrugStrengthBundle\Model\DrugStrengthInterface; use SchemaRepository\Bundle\MedicalIntangibleBundle\Model\AbstractMedicalIntangible; /** * Model of Drug Strength * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DrugStrengthBundle\Model */ abstract class AbstractDrugStrength extends AbstractMedicalIntangible implements DrugStrengthInterface { /** * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $activeIngredient; /** * The location in which the strength is available. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $availableIn; /** * The units of an active ingredient's strength, e.g. mg. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $strengthUnit; /** * The value of an active ingredient's strength, e.g. 325. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $strengthValue; /** * Getter of Active Ingredient * * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getActiveIngredient() { return $this->activeIngredient; } /** * Setter of Active Ingredient * * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of activeIngredient */ public function setActiveIngredient($value) { $this->activeIngredient = $value; } /** * Getter of Available in * * The location in which the strength is available. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailableIn() { return $this->availableIn; } /** * Setter of Available in * * The location in which the strength is available. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of availableIn */ public function setAvailableIn($value) { $this->availableIn = $value; } /** * Getter of Strength Unit * * The units of an active ingredient's strength, e.g. mg. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getStrengthUnit() { return $this->strengthUnit; } /** * Setter of Strength Unit * * The units of an active ingredient's strength, e.g. mg. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of strengthUnit */ public function setStrengthUnit($value) { $this->strengthUnit = $value; } /** * Getter of Strength Value * * The value of an active ingredient's strength, e.g. 325. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getStrengthValue() { return $this->strengthValue; } /** * Setter of Strength Value * * The value of an active ingredient's strength, e.g. 325. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of strengthValue */ public function setStrengthValue($value) { $this->strengthValue = $value; } } <file_sep>/GenerationSamples/ProductBundle/Model/AbstractProduct.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\ProductBundle\Model; use SchemaRepository\Bundle\ProductBundle\Model\ProductInterface; use SchemaRepository\Bundle\ThingBundle\Model\AbstractThing; /** * Model of Product * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\ProductBundle\Model */ abstract class AbstractProduct extends AbstractThing implements ProductInterface { /** * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $aggregateRating; /** * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $audience; /** * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $brand; /** * The color of the product. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $color; /** * The depth of the product. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $depth; /** * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $gtin13; /** * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $gtin14; /** * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $gtin8; /** * The height of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $height; /** * A pointer to another product (or multiple products) for which this * product is an accessory or spare part. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $isAccessoryOrSparePartFor; /** * A pointer to another product (or multiple products) for which this * product is a consumable. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $isConsumableFor; /** * A pointer to another, somehow related product (or multiple products). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $isRelatedTo; /** * A pointer to another, functionally similar product (or multiple * products). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $isSimilarTo; /** * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $itemCondition; /** * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $logo; /** * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $manufacturer; /** * The model of the product. Use with the URL of a ProductModel or a * textual representation of the model identifier. The URL of the * ProductModel can be from an external source. It is recommended to * additionally provide strong product identifiers via the * gtin8/gtin13/gtin14 and mpn properties. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $model; /** * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $mpn; /** * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $offers; /** * The product identifier, such as ISBN. For example: <meta * itemprop='productID' content='isbn:123-456-789'/>. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $productID; /** * The release date of a product or product model. This can be used to * distinguish the exact variant of a product. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $releaseDate; /** * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $review; /** * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $reviews; /** * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $sku; /** * The weight of the product. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $weight; /** * The width of the item. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $width; /** * Getter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAggregateRating() { return $this->aggregateRating; } /** * Setter of Aggregate Rating * * The overall rating, based on a collection of reviews or ratings, of * the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of aggregateRating */ public function setAggregateRating($value) { $this->aggregateRating = $value; } /** * Getter of Audience * * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAudience() { return $this->audience; } /** * Setter of Audience * * The intended audience of the item, i.e. the group for whom the item * was created. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of audience */ public function setAudience($value) { $this->audience = $value; } /** * Getter of Brand * * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBrand() { return $this->brand; } /** * Setter of Brand * * The brand(s) associated with a product or service, or the brand(s) * maintained by an organization or business person. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of brand */ public function setBrand($value) { $this->brand = $value; } /** * Getter of Color * * The color of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getColor() { return $this->color; } /** * Setter of Color * * The color of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of color */ public function setColor($value) { $this->color = $value; } /** * Getter of Depth * * The depth of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDepth() { return $this->depth; } /** * Setter of Depth * * The depth of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of depth */ public function setDepth($value) { $this->depth = $value; } /** * Getter of Gtin13 * * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin13() { return $this->gtin13; } /** * Setter of Gtin13 * * The GTIN-13 code of the product, or the product to which the offer * refers. This is equivalent to 13-digit ISBN codes and EAN UCC-13. * Former 12-digit UPC codes can be converted into a GTIN-13 code by * simply adding a preceeding zero. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of gtin13 */ public function setGtin13($value) { $this->gtin13 = $value; } /** * Getter of Gtin14 * * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin14() { return $this->gtin14; } /** * Setter of Gtin14 * * The GTIN-14 code of the product, or the product to which the offer * refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of gtin14 */ public function setGtin14($value) { $this->gtin14 = $value; } /** * Getter of Gtin8 * * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getGtin8() { return $this->gtin8; } /** * Setter of Gtin8 * * The GTIN-8 code of the product, or the product to which the offer * refers. This code is also known as EAN/UCC-8 or 8-digit EAN. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of gtin8 */ public function setGtin8($value) { $this->gtin8 = $value; } /** * Getter of Height * * The height of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHeight() { return $this->height; } /** * Setter of Height * * The height of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of height */ public function setHeight($value) { $this->height = $value; } /** * Getter of Is Accessory or Spare Part for * * A pointer to another product (or multiple products) for which this * product is an accessory or spare part. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsAccessoryOrSparePartFor() { return $this->isAccessoryOrSparePartFor; } /** * Setter of Is Accessory or Spare Part for * * A pointer to another product (or multiple products) for which this * product is an accessory or spare part. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of isAccessoryOrSparePartFor */ public function setIsAccessoryOrSparePartFor($value) { $this->isAccessoryOrSparePartFor = $value; } /** * Getter of Is Consumable for * * A pointer to another product (or multiple products) for which this * product is a consumable. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsConsumableFor() { return $this->isConsumableFor; } /** * Setter of Is Consumable for * * A pointer to another product (or multiple products) for which this * product is a consumable. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of isConsumableFor */ public function setIsConsumableFor($value) { $this->isConsumableFor = $value; } /** * Getter of Is Related to * * A pointer to another, somehow related product (or multiple products). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsRelatedTo() { return $this->isRelatedTo; } /** * Setter of Is Related to * * A pointer to another, somehow related product (or multiple products). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of isRelatedTo */ public function setIsRelatedTo($value) { $this->isRelatedTo = $value; } /** * Getter of Is Similar to * * A pointer to another, functionally similar product (or multiple * products). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIsSimilarTo() { return $this->isSimilarTo; } /** * Setter of Is Similar to * * A pointer to another, functionally similar product (or multiple * products). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of isSimilarTo */ public function setIsSimilarTo($value) { $this->isSimilarTo = $value; } /** * Getter of Item Condition * * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getItemCondition() { return $this->itemCondition; } /** * Setter of Item Condition * * A predefined value from OfferItemCondition or a textual description of * the condition of the product or service, or the products or services * included in the offer. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of itemCondition */ public function setItemCondition($value) { $this->itemCondition = $value; } /** * Getter of Logo * * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLogo() { return $this->logo; } /** * Setter of Logo * * URL of an image for the logo of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of logo */ public function setLogo($value) { $this->logo = $value; } /** * Getter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getManufacturer() { return $this->manufacturer; } /** * Setter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of manufacturer */ public function setManufacturer($value) { $this->manufacturer = $value; } /** * Getter of Model * * The model of the product. Use with the URL of a ProductModel or a * textual representation of the model identifier. The URL of the * ProductModel can be from an external source. It is recommended to * additionally provide strong product identifiers via the * gtin8/gtin13/gtin14 and mpn properties. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getModel() { return $this->model; } /** * Setter of Model * * The model of the product. Use with the URL of a ProductModel or a * textual representation of the model identifier. The URL of the * ProductModel can be from an external source. It is recommended to * additionally provide strong product identifiers via the * gtin8/gtin13/gtin14 and mpn properties. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of model */ public function setModel($value) { $this->model = $value; } /** * Getter of Mpn * * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMpn() { return $this->mpn; } /** * Setter of Mpn * * The Manufacturer Part Number (MPN) of the product, or the product to * which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of mpn */ public function setMpn($value) { $this->mpn = $value; } /** * Getter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOffers() { return $this->offers; } /** * Setter of Offers * * An offer to sell this item—for example, an offer to sell a product, * the DVD of a movie, or tickets to an event. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of offers */ public function setOffers($value) { $this->offers = $value; } /** * Getter of Product ID * * The product identifier, such as ISBN. For example: <meta * itemprop='productID' content='isbn:123-456-789'/>. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getProductID() { return $this->productID; } /** * Setter of Product ID * * The product identifier, such as ISBN. For example: <meta * itemprop='productID' content='isbn:123-456-789'/>. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of productID */ public function setProductID($value) { $this->productID = $value; } /** * Getter of Release Date * * The release date of a product or product model. This can be used to * distinguish the exact variant of a product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReleaseDate() { return $this->releaseDate; } /** * Setter of Release Date * * The release date of a product or product model. This can be used to * distinguish the exact variant of a product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of releaseDate */ public function setReleaseDate($value) { $this->releaseDate = $value; } /** * Getter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getReview() { return $this->review; } /** * Setter of Review * * A review of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function setReview($value) { $this->review = $value; } /** * Getter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getReviews() { return $this->reviews; } /** * Setter of Reviews * * Review of the item (legacy spelling; see singular form, review). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of reviews */ public function setReviews($value) { $this->reviews = $value; } /** * Add review to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function addReview($value) { $this->reviews[] = $value; } /** * Remove review to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of review */ public function removeReview($value) { $key = array_search($value, $this->reviews); if($key !== false) { unset($this->reviews[$key]); } } /** * Getter of Sku * * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSku() { return $this->sku; } /** * Setter of Sku * * The Stock Keeping Unit (SKU), i.e. a merchant-specific identifier for * a product or service, or the product to which the offer refers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of sku */ public function setSku($value) { $this->sku = $value; } /** * Getter of Weight * * The weight of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWeight() { return $this->weight; } /** * Setter of Weight * * The weight of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of weight */ public function setWeight($value) { $this->weight = $value; } /** * Getter of Width * * The width of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWidth() { return $this->width; } /** * Setter of Width * * The width of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of width */ public function setWidth($value) { $this->width = $value; } } <file_sep>/GenerationSamples/MedicalSignBundle/Model/AbstractMedicalSign.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MedicalSignBundle\Model; use SchemaRepository\Bundle\MedicalSignBundle\Model\MedicalSignInterface; use SchemaRepository\Bundle\MedicalSignOrSymptomBundle\Model\AbstractMedicalSignOrSymptom; /** * Model of Medical Sign * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MedicalSignBundle\Model */ abstract class AbstractMedicalSign extends AbstractMedicalSignOrSymptom implements MedicalSignInterface { /** * A physical examination that can identify this sign. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $identifyingExam; /** * A diagnostic test that can identify this sign. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $identifyingTest; /** * Getter of Identifying Exam * * A physical examination that can identify this sign. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIdentifyingExam() { return $this->identifyingExam; } /** * Setter of Identifying Exam * * A physical examination that can identify this sign. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of identifyingExam */ public function setIdentifyingExam($value) { $this->identifyingExam = $value; } /** * Getter of Identifying Test * * A diagnostic test that can identify this sign. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getIdentifyingTest() { return $this->identifyingTest; } /** * Setter of Identifying Test * * A diagnostic test that can identify this sign. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of identifyingTest */ public function setIdentifyingTest($value) { $this->identifyingTest = $value; } } <file_sep>/GenerationSamples/TVEpisodeBundle/Model/AbstractTVEpisode.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\TVEpisodeBundle\Model; use SchemaRepository\Bundle\TVEpisodeBundle\Model\TVEpisodeInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of TV Episode * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\TVEpisodeBundle\Model */ abstract class AbstractTVEpisode extends AbstractCreativeWork implements TVEpisodeInterface { /** * A cast member of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $actor; /** * A cast member of the movie, TV series, season, or episode, or video. * (legacy spelling; see singular form, actor) * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $actors; /** * The director of the movie, TV episode, or series. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $director; /** * The episode number. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $episodeNumber; /** * The composer of the movie or TV soundtrack. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $musicBy; /** * The season to which this episode belongs. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $partOfSeason; /** * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $partOfTVSeries; /** * The producer of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $producer; /** * The production company or studio that made the movie, TV series, * season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $productionCompany; /** * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $trailer; /** * Getter of Actor * * A cast member of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getActor() { return $this->actor; } /** * Setter of Actor * * A cast member of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of actor */ public function setActor($value) { $this->actor = $value; } /** * Getter of Actors * * A cast member of the movie, TV series, season, or episode, or video. * (legacy spelling; see singular form, actor) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getActors() { return $this->actors; } /** * Setter of Actors * * A cast member of the movie, TV series, season, or episode, or video. * (legacy spelling; see singular form, actor) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of actors */ public function setActors($value) { $this->actors = $value; } /** * Add actor to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of actor */ public function addActor($value) { $this->actors[] = $value; } /** * Remove actor to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of actor */ public function removeActor($value) { $key = array_search($value, $this->actors); if($key !== false) { unset($this->actors[$key]); } } /** * Getter of Director * * The director of the movie, TV episode, or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDirector() { return $this->director; } /** * Setter of Director * * The director of the movie, TV episode, or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of director */ public function setDirector($value) { $this->director = $value; } /** * Getter of Episode Number * * The episode number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getEpisodeNumber() { return $this->episodeNumber; } /** * Setter of Episode Number * * The episode number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of episodeNumber */ public function setEpisodeNumber($value) { $this->episodeNumber = $value; } /** * Getter of Music by * * The composer of the movie or TV soundtrack. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getMusicBy() { return $this->musicBy; } /** * Setter of Music by * * The composer of the movie or TV soundtrack. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of musicBy */ public function setMusicBy($value) { $this->musicBy = $value; } /** * Getter of Part of Season * * The season to which this episode belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfSeason() { return $this->partOfSeason; } /** * Setter of Part of Season * * The season to which this episode belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of partOfSeason */ public function setPartOfSeason($value) { $this->partOfSeason = $value; } /** * Getter of Part of TV Series * * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfTVSeries() { return $this->partOfTVSeries; } /** * Setter of Part of TV Series * * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of partOfTVSeries */ public function setPartOfTVSeries($value) { $this->partOfTVSeries = $value; } /** * Getter of Producer * * The producer of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProducer() { return $this->producer; } /** * Setter of Producer * * The producer of the movie, TV series, season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of producer */ public function setProducer($value) { $this->producer = $value; } /** * Getter of Production Company * * The production company or studio that made the movie, TV series, * season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getProductionCompany() { return $this->productionCompany; } /** * Setter of Production Company * * The production company or studio that made the movie, TV series, * season, or episode, or video. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of productionCompany */ public function setProductionCompany($value) { $this->productionCompany = $value; } /** * Getter of Trailer * * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrailer() { return $this->trailer; } /** * Setter of Trailer * * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of trailer */ public function setTrailer($value) { $this->trailer = $value; } } <file_sep>/GenerationSamples/TVSeasonBundle/Model/AbstractTVSeason.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\TVSeasonBundle\Model; use SchemaRepository\Bundle\TVSeasonBundle\Model\TVSeasonInterface; use SchemaRepository\Bundle\CreativeWorkBundle\Model\AbstractCreativeWork; /** * Model of TV Season * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\TVSeasonBundle\Model */ abstract class AbstractTVSeason extends AbstractCreativeWork implements TVSeasonInterface { /** * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $endDate; /** * An episode of a TV series or season. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $episode; /** * The episode of a TV series or season (legacy spelling; see singular * form, episode). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $episodes; /** * The number of episodes in this season or series. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal */ protected $numberOfEpisodes; /** * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $partOfTVSeries; /** * The season number. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long */ protected $seasonNumber; /** * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $startDate; /** * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $trailer; /** * Getter of End Date * * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEndDate() { return $this->endDate; } /** * Setter of End Date * * The end date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of endDate */ public function setEndDate($value) { $this->endDate = $value; } /** * Getter of Episode * * An episode of a TV series or season. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEpisode() { return $this->episode; } /** * Setter of Episode * * An episode of a TV series or season. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of episode */ public function setEpisode($value) { $this->episode = $value; } /** * Getter of Episodes * * The episode of a TV series or season (legacy spelling; see singular * form, episode). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getEpisodes() { return $this->episodes; } /** * Setter of Episodes * * The episode of a TV series or season (legacy spelling; see singular * form, episode). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of episodes */ public function setEpisodes($value) { $this->episodes = $value; } /** * Add episode to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of episode */ public function addEpisode($value) { $this->episodes[] = $value; } /** * Remove episode to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of episode */ public function removeEpisode($value) { $key = array_search($value, $this->episodes); if($key !== false) { unset($this->episodes[$key]); } } /** * Getter of Number of Episodes * * The number of episodes in this season or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getNumberOfEpisodes() { return $this->numberOfEpisodes; } /** * Setter of Number of Episodes * * The number of episodes in this season or series. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal $value Value of numberOfEpisodes */ public function setNumberOfEpisodes($value) { $this->numberOfEpisodes = $value; } /** * Getter of Part of TV Series * * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPartOfTVSeries() { return $this->partOfTVSeries; } /** * Setter of Part of TV Series * * The TV series to which this episode or season belongs. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of partOfTVSeries */ public function setPartOfTVSeries($value) { $this->partOfTVSeries = $value; } /** * Getter of Season Number * * The season number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long */ public function getSeasonNumber() { return $this->seasonNumber; } /** * Setter of Season Number * * The season number. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long $value Value of seasonNumber */ public function setSeasonNumber($value) { $this->seasonNumber = $value; } /** * Getter of Start Date * * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getStartDate() { return $this->startDate; } /** * Setter of Start Date * * The start date and time of the event (in ISO 8601 date format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of startDate */ public function setStartDate($value) { $this->startDate = $value; } /** * Getter of Trailer * * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getTrailer() { return $this->trailer; } /** * Setter of Trailer * * The trailer of the movie or TV series, season, or episode. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of trailer */ public function setTrailer($value) { $this->trailer = $value; } } <file_sep>/GenerationSamples/PersonBundle/Entity/AbstractPerson.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\PersonBundle\Entity; use SchemaRepository\Bundle\PersonBundle\Model\AbstractPerson; use Doctrine\ORM\Mapping as ORM; use Symfony\Component\Validator\Constraints as Assert; /** * Person Entity * * @ODM\MappedSuperclass() */ abstract class AbstractPerson extends AbstractPerson { /** * {@inheritdoc} * * @ORM\Column(name="id", type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="AUTO") */ protected $id; /** * {@inheritdoc} * * @ORM\Column(name="additional_type", type="string", nullable=true) * @Assert\Type(type="string") */ protected $additionalType; /** * {@inheritdoc} * * @ORM\Column(name="description", type="string", nullable=true) * @Assert\Type(type="string") */ protected $description; /** * {@inheritdoc} * * @ORM\Column(name="image", type="string", nullable=true) * @Assert\Type(type="string") */ protected $image; /** * {@inheritdoc} * * @ORM\Column(name="name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $name; /** * {@inheritdoc} * * @ORM\Column(name="same_as", type="string", nullable=true) * @Assert\Type(type="string") */ protected $sameAs; /** * {@inheritdoc} * * @ORM\Column(name="url", type="string", nullable=true) * @Assert\Type(type="string") */ protected $url; /** * {@inheritdoc} * * @ORM\Column(name="additional_name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $additionalName; /** * {@inheritdoc} * * @ORM\Column(name="address", type="string", nullable=true) */ protected $address; /** * {@inheritdoc} * * @ORM\Column(name="affiliation", type="string", nullable=true) */ protected $affiliation; /** * {@inheritdoc} * * @ORM\Column(name="alumni_of", type="string", nullable=true) */ protected $alumniOf; /** * {@inheritdoc} * * @ORM\Column(name="award", type="string", nullable=true) * @Assert\Type(type="string") */ protected $award; /** * {@inheritdoc} * * @ORM\Column(name="awards", type="string", nullable=true) * @Assert\Type(type="string") */ protected $awards; /** * {@inheritdoc} * * @ORM\Column(name="birth_date", type="string", nullable=true) */ protected $birthDate; /** * {@inheritdoc} * * @ORM\Column(name="brand", type="string", nullable=true) */ protected $brand; /** * {@inheritdoc} * * @ORM\Column(name="children", type="string", nullable=true) */ protected $children; /** * {@inheritdoc} * * @ORM\Column(name="colleague", type="string", nullable=true) */ protected $colleague; /** * {@inheritdoc} * * @ORM\Column(name="colleagues", type="array", nullable=true) */ protected $colleagues; /** * {@inheritdoc} * * @ORM\Column(name="contact_point", type="string", nullable=true) */ protected $contactPoint; /** * {@inheritdoc} * * @ORM\Column(name="contact_points", type="array", nullable=true) */ protected $contactPoints; /** * {@inheritdoc} * * @ORM\Column(name="death_date", type="string", nullable=true) */ protected $deathDate; /** * {@inheritdoc} * * @ORM\Column(name="duns", type="string", nullable=true) * @Assert\Type(type="string") */ protected $duns; /** * {@inheritdoc} * * @ORM\Column(name="email", type="string", nullable=true) * @Assert\Type(type="string") */ protected $email; /** * {@inheritdoc} * * @ORM\Column(name="family_name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $familyName; /** * {@inheritdoc} * * @ORM\Column(name="fax_number", type="string", nullable=true) * @Assert\Type(type="string") */ protected $faxNumber; /** * {@inheritdoc} * * @ORM\Column(name="follows", type="string", nullable=true) */ protected $follows; /** * {@inheritdoc} * * @ORM\Column(name="gender", type="string", nullable=true) * @Assert\Type(type="string") */ protected $gender; /** * {@inheritdoc} * * @ORM\Column(name="given_name", type="string", nullable=true) * @Assert\Type(type="string") */ protected $givenName; /** * {@inheritdoc} * * @ORM\Column(name="global_location_number", type="string", nullable=true) * @Assert\Type(type="string") */ protected $globalLocationNumber; /** * {@inheritdoc} * * @ORM\Column(name="has_pos", type="string", nullable=true) */ protected $hasPOS; /** * {@inheritdoc} * * @ORM\Column(name="home_location", type="string", nullable=true) */ protected $homeLocation; /** * {@inheritdoc} * * @ORM\Column(name="honorific_prefix", type="string", nullable=true) * @Assert\Type(type="string") */ protected $honorificPrefix; /** * {@inheritdoc} * * @ORM\Column(name="honorific_suffix", type="string", nullable=true) * @Assert\Type(type="string") */ protected $honorificSuffix; /** * {@inheritdoc} * * @ORM\Column(name="interaction_count", type="string", nullable=true) * @Assert\Type(type="string") */ protected $interactionCount; /** * {@inheritdoc} * * @ORM\Column(name="isic_v4", type="string", nullable=true) * @Assert\Type(type="string") */ protected $isicV4; /** * {@inheritdoc} * * @ORM\Column(name="job_title", type="string", nullable=true) * @Assert\Type(type="string") */ protected $jobTitle; /** * {@inheritdoc} * * @ORM\Column(name="knows", type="string", nullable=true) */ protected $knows; /** * {@inheritdoc} * * @ORM\Column(name="makes_offer", type="string", nullable=true) */ protected $makesOffer; /** * {@inheritdoc} * * @ORM\Column(name="member_of", type="string", nullable=true) */ protected $memberOf; /** * {@inheritdoc} * * @ORM\Column(name="naics", type="string", nullable=true) * @Assert\Type(type="string") */ protected $naics; /** * {@inheritdoc} * * @ORM\Column(name="nationality", type="string", nullable=true) */ protected $nationality; /** * {@inheritdoc} * * @ORM\Column(name="owns", type="string", nullable=true) */ protected $owns; /** * {@inheritdoc} * * @ORM\Column(name="parent", type="string", nullable=true) */ protected $parent; /** * {@inheritdoc} * * @ORM\Column(name="parents", type="array", nullable=true) */ protected $parents; /** * {@inheritdoc} * * @ORM\Column(name="performer_in", type="string", nullable=true) */ protected $performerIn; /** * {@inheritdoc} * * @ORM\Column(name="related_to", type="string", nullable=true) */ protected $relatedTo; /** * {@inheritdoc} * * @ORM\Column(name="seeks", type="string", nullable=true) */ protected $seeks; /** * {@inheritdoc} * * @ORM\Column(name="sibling", type="string", nullable=true) */ protected $sibling; /** * {@inheritdoc} * * @ORM\Column(name="siblings", type="array", nullable=true) */ protected $siblings; /** * {@inheritdoc} * * @ORM\Column(name="spouse", type="string", nullable=true) */ protected $spouse; /** * {@inheritdoc} * * @ORM\Column(name="tax_id", type="string", nullable=true) * @Assert\Type(type="string") */ protected $taxID; /** * {@inheritdoc} * * @ORM\Column(name="telephone", type="string", nullable=true) * @Assert\Type(type="string") */ protected $telephone; /** * {@inheritdoc} * * @ORM\Column(name="vat_id", type="string", nullable=true) * @Assert\Type(type="string") */ protected $vatID; /** * {@inheritdoc} * * @ORM\Column(name="work_location", type="string", nullable=true) */ protected $workLocation; /** * {@inheritdoc} * * @ORM\Column(name="works_for", type="string", nullable=true) */ protected $worksFor; } <file_sep>/GeneratorClass/Property.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ class Property { private $name; private $label; private $type; private $realType; private $comment; /** * @param string $name * @param string $type */ public function __construct($name, $label, $type, $comment) { global $properties; if(substr($name, -1) == 's' && $type[0] !== 'Text' && $type[0] !== 'URL' && isset($properties[substr($name, 0, -1)])) { $type = array('Array'); } $this->name = $name; $this->label = $label; $this->realType = $type; $this->type = DataType::getPhpType($type); $this->comment = $comment; } /** * return the php code of attribute declaration * * @return string */ public function getAttributeDeclaration() { $function = "\n /**\n" . " * " . wordwrap($this->comment, 70, "\n * ", true) . "\n" . " * \n" . " * @author SchemaGenerator <<EMAIL>>\n" . " * \n" . " * @access protected\n" . " * @var {$this->type}\n" . " */\n" . " protected \${$this->name};\n"; return $function; } /** * Return the getter Comment * * @return string */ private function getterComment() { return "\n /**\n" . " * Getter of {$this->label}\n" . " * \n" . " * " . wordwrap($this->comment, 70, "\n * ", true) . "\n" . " * \n" . " * @author SchemaGenerator <<EMAIL>>\n" . " * \n" . " * @access public\n" . " * \n" . " * @return {$this->type}\n" . " */\n"; } /** * return the php code of getter * * @return string */ public function getGetter() { $function = $this->getterComment() . " public function get" . ucfirst($this->name) . "()\n" . " {\n" . " return \$this->{$this->name};\n" . " }\n"; return $function; } /** * return the php code of getter interface * * @return string */ public function getGetterInterface() { $function = $this->getterComment() . " public function get" . ucfirst($this->name) . "();\n"; return $function; } /** * Return the setter Comment * * @return string */ private function setterComment() { return "\n /**\n" . " * Setter of {$this->label}\n" . " * \n" . " * " . wordwrap($this->comment, 70, "\n * ", true) . "\n" . " * \n" . " * @author SchemaGenerator <<EMAIL>>\n" . " * \n" . " * @access public\n" . " * \n" . " * @param {$this->type} \$value Value of {$this->name}\n" . " */\n"; } /** * return the php code of setter * * @return string */ public function getSetter() { $function = $this->setterComment() . " public function set" . ucfirst($this->name) . "(\$value)\n" . " {\n" . " \$this->{$this->name} = \$value;\n" . " }\n"; if($this->type === 'array') { $function .= $this->getArraySetter(); } return $function; } private function getArraySetter() { $function = "\n /**\n" . " * Add " . substr($this->name, 0, -1) . " to Array\n" . " * \n" . " * @author SchemaGenerator <<EMAIL>>\n" . " * \n" . " * @access public\n" . " * \n" . " * @param mixed \$value Value of " . substr($this->name, 0, -1) . "\n" . " */\n" . " public function add" . ucfirst(substr($this->name, 0, -1)) . "(\$value)\n" . " {\n" . " \$this->{$this->name}[] = \$value;\n" . " }\n" . "\n /**\n" . " * Remove " . substr($this->name, 0, -1) . " to Array\n" . " * \n" . " * @author SchemaGenerator <<EMAIL>>\n" . " * \n" . " * @access public\n" . " * \n" . " * @param mixed \$value Value of " . substr($this->name, 0, -1) . "\n" . " */\n" . " public function remove" . ucfirst(substr($this->name, 0, -1)) . "(\$value)\n" . " {\n" . " \$key = array_search(\$value, \$this->{$this->name});\n" . " if(\$key !== false) {\n" . " unset(\$this->{$this->name}[\$key]);\n" . " }\n" . " }\n"; return $function; } /** * return the php code of setter interface * * @return string */ public function getSetterInterface() { $function = $this->setterComment(true) . " public function set" . ucfirst($this->name) . "(\$value);\n\n"; return $function; } /** * return the name of the property * * @return string */ public function getLabel() { return $this->label; } /** * return the name of the property * * @return string */ public function getName() { return $this->name; } /** * return the type of the property * * @return string */ public function getType() { return $this->type; } }<file_sep>/GenerationSamples/GeoShapeBundle/Model/AbstractGeoShape.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\GeoShapeBundle\Model; use SchemaRepository\Bundle\GeoShapeBundle\Model\GeoShapeInterface; use SchemaRepository\Bundle\StructuredValueBundle\Model\AbstractStructuredValue; /** * Model of Geo Shape * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\GeoShapeBundle\Model */ abstract class AbstractGeoShape extends AbstractStructuredValue implements GeoShapeInterface { /** * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $box; /** * A circle is the circular region of a specified radius centered at a * specified latitude and longitude. A circle is expressed as a pair * followed by a radius in meters. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $circle; /** * The elevation of a location. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|integer|long|float|decimal */ protected $elevation; /** * A line is a point-to-point path consisting of two or more points. A * line is expressed as a series of two or more point objects separated * by space. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $line; /** * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $polygon; /** * Getter of Box * * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBox() { return $this->box; } /** * Setter of Box * * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of box */ public function setBox($value) { $this->box = $value; } /** * Getter of Circle * * A circle is the circular region of a specified radius centered at a * specified latitude and longitude. A circle is expressed as a pair * followed by a radius in meters. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCircle() { return $this->circle; } /** * Setter of Circle * * A circle is the circular region of a specified radius centered at a * specified latitude and longitude. A circle is expressed as a pair * followed by a radius in meters. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of circle */ public function setCircle($value) { $this->circle = $value; } /** * Getter of Elevation * * The elevation of a location. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|integer|long|float|decimal */ public function getElevation() { return $this->elevation; } /** * Setter of Elevation * * The elevation of a location. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|integer|long|float|decimal $value Value of elevation */ public function setElevation($value) { $this->elevation = $value; } /** * Getter of Line * * A line is a point-to-point path consisting of two or more points. A * line is expressed as a series of two or more point objects separated * by space. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getLine() { return $this->line; } /** * Setter of Line * * A line is a point-to-point path consisting of two or more points. A * line is expressed as a series of two or more point objects separated * by space. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of line */ public function setLine($value) { $this->line = $value; } /** * Getter of Polygon * * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPolygon() { return $this->polygon; } /** * Setter of Polygon * * A polygon is the area enclosed by a point-to-point path for which the * starting and ending points are the same. A polygon is expressed as a * series of four or more spacedelimited points where the first and final * points are identical. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of polygon */ public function setPolygon($value) { $this->polygon = $value; } } <file_sep>/GenerationSamples/DrugBundle/Model/DrugInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\DrugBundle\Model; use SchemaRepository\Bundle\MedicalTherapyBundle\Model\MedicalTherapyInterface; /** * Interface of Drug Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\DrugBundle\Model */ interface DrugInterface extends MedicalTherapyInterface { /** * Getter of Active Ingredient * * An active ingredient, typically chemical compounds and/or biologic * substances. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getActiveIngredient(); /** * Getter of Administration Route * * A route by which this drug may be administered, e.g. 'oral'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAdministrationRoute(); /** * Getter of Alcohol Warning * * Any precaution, guidance, contraindication, etc. related to * consumption of alcohol while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getAlcoholWarning(); /** * Getter of Available Strength * * An available dosage strength for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAvailableStrength(); /** * Getter of Breastfeeding Warning * * Any precaution, guidance, contraindication, etc. related to this * drug's use by breastfeeding mothers. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBreastfeedingWarning(); /** * Getter of Clincal Pharmacology * * Description of the absorption and elimination of drugs, including * their concentration (pharmacokinetics, pK) and biological effects * (pharmacodynamics, pD). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getClincalPharmacology(); /** * Getter of Cost * * Cost per unit of the drug, as reported by the source being tagged. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getCost(); /** * Getter of Dosage Form * * A dosage form in which this drug/supplement is available, e.g. * 'tablet', 'suspension', 'injection'. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getDosageForm(); /** * Getter of Dose Schedule * * A dosing schedule for the drug for a given population, either * observed, recommended, or maximum dose based on the type used. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDoseSchedule(); /** * Getter of Drug Class * * The class of drug this belongs to (e.g., statins). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDrugClass(); /** * Getter of Food Warning * * Any precaution, guidance, contraindication, etc. related to * consumption of specific foods while taking this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getFoodWarning(); /** * Getter of Interacting Drug * * Another drug that is known to interact with this drug in a way that * impacts the effect of this drug or causes a risk to the patient. Note: * disease interactions are typically captured as contraindications. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getInteractingDrug(); /** * Getter of Is Available Generically * * True if the drug is available in a generic form (regardless of name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getIsAvailableGenerically(); /** * Getter of Is Proprietary * * True if this item's name is a proprietary/brand name (vs. generic * name). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getIsProprietary(); /** * Getter of Label Details * * Link to the drug's label details. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getLabelDetails(); /** * Getter of Legal Status * * The drug or supplement's legal status, including any controlled * substance schedules that apply. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getLegalStatus(); /** * Getter of Manufacturer * * The manufacturer of the product. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getManufacturer(); /** * Getter of Mechanism of Action * * The specific biochemical interaction through which this drug or * supplement produces its pharmacological effect. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMechanismOfAction(); /** * Getter of Non Proprietary Name * * The generic name of this drug or supplement. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getNonProprietaryName(); /** * Getter of Overdosage * * Any information related to overdose on a drug, including signs or * symptoms, treatments, contact information for emergency response. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOverdosage(); /** * Getter of Pregnancy Category * * Pregnancy category of this drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPregnancyCategory(); /** * Getter of Pregnancy Warning * * Any precaution, guidance, contraindication, etc. related to this * drug's use during pregnancy. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPregnancyWarning(); /** * Getter of Prescribing Info * * Link to prescribing information for the drug. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPrescribingInfo(); /** * Getter of Prescription Status * * Indicates whether this drug is available by prescription or * over-the-counter. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPrescriptionStatus(); /** * Getter of Related Drug * * Any other drug related to this one, for example commonly-prescribed * alternatives. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRelatedDrug(); /** * Getter of Warning * * Any FDA or other warnings about the drug (text or URL). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|string */ public function getWarning(); } <file_sep>/GenerationSamples/NerveBundle/Model/NerveInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\NerveBundle\Model; use SchemaRepository\Bundle\AnatomicalStructureBundle\Model\AnatomicalStructureInterface; /** * Interface of Nerve Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\NerveBundle\Model */ interface NerveInterface extends AnatomicalStructureInterface { /** * Getter of Branch * * The branches that delineate from the nerve bundle. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBranch(); /** * Getter of Nerve Motor * * The neurological pathway extension that involves muscle control. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getNerveMotor(); /** * Getter of Sensory Unit * * The neurological pathway extension that inputs and sends information * to the brain or spinal cord. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSensoryUnit(); /** * Getter of Sourced From * * The neurological pathway that originates the neurons. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getSourcedFrom(); } <file_sep>/GenerationSamples/MediaObjectBundle/Model/MediaObjectInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\MediaObjectBundle\Model; use SchemaRepository\Bundle\CreativeWorkBundle\Model\CreativeWorkInterface; /** * Interface of Media Object Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\MediaObjectBundle\Model */ interface MediaObjectInterface extends CreativeWorkInterface { /** * Getter of Associated Article * * A NewsArticle associated with the Media Object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getAssociatedArticle(); /** * Getter of Bitrate * * The bitrate of the media object. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBitrate(); /** * Getter of Content Size * * File size in (mega/kilo) bytes. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getContentSize(); /** * Getter of Content Url * * Actual bytes of the media object, for example the image file or video * file. (previous spelling: contentURL) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getContentUrl(); /** * Getter of Duration * * The duration of the item (movie, audio recording, event, etc.) in ISO * 8601 date format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDuration(); /** * Getter of Embed Url * * A URL pointing to a player for a specific video. In general, this is * the information in the src element of an embed tag and should not be * the same as the content of the loc tag. (previous spelling: embedURL) * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEmbedUrl(); /** * Getter of Encodes Creative Work * * The creative work encoded by this media object * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getEncodesCreativeWork(); /** * Getter of Encoding Format * * mp3, mpeg4, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEncodingFormat(); /** * Getter of Expires * * Date the content expires and is no longer useful or available. Useful * for videos. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getExpires(); /** * Getter of Height * * The height of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHeight(); /** * Getter of Player Type * * Player type required—for example, Flash or Silverlight. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPlayerType(); /** * Getter of Regions Allowed * * The regions where the media is allowed. If not specified, then it's * assumed to be allowed everywhere. Specify the countries in ISO 3166 * format. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getRegionsAllowed(); /** * Getter of Requires Subscription * * Indicates if use of the media require a subscription (either paid or * free). Allowed values are true or false (note that an earlier version * had 'yes', 'no'). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return bool */ public function getRequiresSubscription(); /** * Getter of Upload Date * * Date when this media object was uploaded to this site. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getUploadDate(); /** * Getter of Width * * The width of the item. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getWidth(); } <file_sep>/GenerationSamples/GeoCoordinatesBundle/Model/AbstractGeoCoordinates.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\GeoCoordinatesBundle\Model; use SchemaRepository\Bundle\GeoCoordinatesBundle\Model\GeoCoordinatesInterface; use SchemaRepository\Bundle\StructuredValueBundle\Model\AbstractStructuredValue; /** * Model of Geo Coordinates * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\GeoCoordinatesBundle\Model */ abstract class AbstractGeoCoordinates extends AbstractStructuredValue implements GeoCoordinatesInterface { /** * The elevation of a location. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string|integer|long|float|decimal */ protected $elevation; /** * The latitude of a location. For example 37.42242. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal|string */ protected $latitude; /** * The longitude of a location. For example -122.08585. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var integer|long|float|decimal|string */ protected $longitude; /** * Getter of Elevation * * The elevation of a location. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string|integer|long|float|decimal */ public function getElevation() { return $this->elevation; } /** * Setter of Elevation * * The elevation of a location. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string|integer|long|float|decimal $value Value of elevation */ public function setElevation($value) { $this->elevation = $value; } /** * Getter of Latitude * * The latitude of a location. For example 37.42242. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal|string */ public function getLatitude() { return $this->latitude; } /** * Setter of Latitude * * The latitude of a location. For example 37.42242. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal|string $value Value of latitude */ public function setLatitude($value) { $this->latitude = $value; } /** * Getter of Longitude * * The longitude of a location. For example -122.08585. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal|string */ public function getLongitude() { return $this->longitude; } /** * Setter of Longitude * * The longitude of a location. For example -122.08585. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param integer|long|float|decimal|string $value Value of longitude */ public function setLongitude($value) { $this->longitude = $value; } } <file_sep>/GenerationSamples/JobPostingBundle/Model/JobPostingInterface.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\JobPostingBundle\Model; use SchemaRepository\Bundle\IntangibleBundle\Model\IntangibleInterface; /** * Interface of Job Posting Model * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\JobPostingBundle\Model */ interface JobPostingInterface extends IntangibleInterface { /** * Getter of Base Salary * * The base salary of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return integer|long|float|decimal */ public function getBaseSalary(); /** * Getter of Benefits * * Description of benefits associated with the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getBenefits(); /** * Getter of Date Posted * * Publication date for the job posting. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getDatePosted(); /** * Getter of Education Requirements * * Educational background needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEducationRequirements(); /** * Getter of Employment Type * * Type of employment (e.g. full-time, part-time, contract, temporary, * seasonal, internship). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getEmploymentType(); /** * Getter of Experience Requirements * * Description of skills and experience needed for the position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getExperienceRequirements(); /** * Getter of Hiring Organization * * Organization offering the job position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getHiringOrganization(); /** * Getter of Incentives * * Description of bonus and commission compensation aspects of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIncentives(); /** * Getter of Industry * * The industry associated with the job position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getIndustry(); /** * Getter of Job Location * * A (typically single) geographic location associated with the job * position. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getJobLocation(); /** * Getter of Occupational Category * * Category or categories describing the job. Use BLS O*NET-SOC taxonomy: * http://www.onetcenter.org/taxonomy.html. Ideally includes textual * label and formal code, with the property repeated for each applicable * value. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getOccupationalCategory(); /** * Getter of Qualifications * * Specific qualifications required for this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getQualifications(); /** * Getter of Responsibilities * * Responsibilities associated with this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getResponsibilities(); /** * Getter of Salary Currency * * The currency (coded using ISO 4217, * http://en.wikipedia.org/wiki/ISO_4217 used for the main salary * information in this job posting. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSalaryCurrency(); /** * Getter of Skills * * Skills required to fulfill this role. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSkills(); /** * Getter of Special Commitments * * Any special commitments associated with this job posting. Valid * entries include VeteranCommit, MilitarySpouseCommit, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getSpecialCommitments(); /** * Getter of Title * * The title of the job. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getTitle(); /** * Getter of Work Hours * * The typical working hours for this job (e.g. 1st shift, night shift, * 8am-5pm). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getWorkHours(); } <file_sep>/GenerationSamples/LocalBusinessBundle/Model/AbstractLocalBusiness.php <?php /** * The MIT License (MIT) * * Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ namespace SchemaRepository\Bundle\LocalBusinessBundle\Model; use SchemaRepository\Bundle\LocalBusinessBundle\Model\LocalBusinessInterface; use SchemaRepository\Bundle\OrganizationBundle\Model\AbstractOrganization; /** * Model of Local Business * * @author SchemaGenerator <<EMAIL>> * @package SchemaRepository\Bundle\LocalBusinessBundle\Model */ abstract class AbstractLocalBusiness extends AbstractOrganization implements LocalBusinessInterface { /** * The larger organization that this local business is a branch of, if * any. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $branchOf; /** * The basic containment relation between places. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $containedIn; /** * The currency accepted (in ISO 4217 currency format). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $currenciesAccepted; /** * The geo coordinates of the place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $geo; /** * A URL to a map of the place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $map; /** * A URL to a map of the place (legacy spelling; see singular form, map). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $maps; /** * The opening hours for a business. Opening hours can be specified as a * weekly time range, starting with days, then times per day. Multiple * days can be listed with commas ',' separating each day. Day or time * ranges are specified using a hyphen '-'.- Days are specified using the * following two-letter combinations: Mo, Tu, We, Th, Fr, Sa, Su.- Times * are specified using 24:00 time. For example, 3pm is specified as * 15:00. - Here is an example: <time itemprop="openingHours" * datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time>. - If * a business is open 7 days a week, then it can be specified as <time * itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all * day</time>. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $openingHours; /** * The opening hours of a certain place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $openingHoursSpecification; /** * Cash, credit card, etc. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $paymentAccepted; /** * A photograph of this place. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var mixed */ protected $photo; /** * Photographs of this place (legacy spelling; see singular form, photo). * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var array */ protected $photos; /** * The price range of the business, for example $$$. * * @author SchemaGenerator <<EMAIL>> * * @access protected * @var string */ protected $priceRange; /** * Getter of Branch of * * The larger organization that this local business is a branch of, if * any. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getBranchOf() { return $this->branchOf; } /** * Setter of Branch of * * The larger organization that this local business is a branch of, if * any. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of branchOf */ public function setBranchOf($value) { $this->branchOf = $value; } /** * Getter of Contained in * * The basic containment relation between places. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getContainedIn() { return $this->containedIn; } /** * Setter of Contained in * * The basic containment relation between places. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of containedIn */ public function setContainedIn($value) { $this->containedIn = $value; } /** * Getter of Currencies Accepted * * The currency accepted (in ISO 4217 currency format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getCurrenciesAccepted() { return $this->currenciesAccepted; } /** * Setter of Currencies Accepted * * The currency accepted (in ISO 4217 currency format). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of currenciesAccepted */ public function setCurrenciesAccepted($value) { $this->currenciesAccepted = $value; } /** * Getter of Geo * * The geo coordinates of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getGeo() { return $this->geo; } /** * Setter of Geo * * The geo coordinates of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of geo */ public function setGeo($value) { $this->geo = $value; } /** * Getter of Map * * A URL to a map of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMap() { return $this->map; } /** * Setter of Map * * A URL to a map of the place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of map */ public function setMap($value) { $this->map = $value; } /** * Getter of Maps * * A URL to a map of the place (legacy spelling; see singular form, map). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getMaps() { return $this->maps; } /** * Setter of Maps * * A URL to a map of the place (legacy spelling; see singular form, map). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of maps */ public function setMaps($value) { $this->maps = $value; } /** * Getter of Opening Hours * * The opening hours for a business. Opening hours can be specified as a * weekly time range, starting with days, then times per day. Multiple * days can be listed with commas ',' separating each day. Day or time * ranges are specified using a hyphen '-'.- Days are specified using the * following two-letter combinations: Mo, Tu, We, Th, Fr, Sa, Su.- Times * are specified using 24:00 time. For example, 3pm is specified as * 15:00. - Here is an example: <time itemprop="openingHours" * datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time>. - If * a business is open 7 days a week, then it can be specified as <time * itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all * day</time>. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOpeningHours() { return $this->openingHours; } /** * Setter of Opening Hours * * The opening hours for a business. Opening hours can be specified as a * weekly time range, starting with days, then times per day. Multiple * days can be listed with commas ',' separating each day. Day or time * ranges are specified using a hyphen '-'.- Days are specified using the * following two-letter combinations: Mo, Tu, We, Th, Fr, Sa, Su.- Times * are specified using 24:00 time. For example, 3pm is specified as * 15:00. - Here is an example: <time itemprop="openingHours" * datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time>. - If * a business is open 7 days a week, then it can be specified as <time * itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all * day</time>. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of openingHours */ public function setOpeningHours($value) { $this->openingHours = $value; } /** * Getter of Opening Hours Specification * * The opening hours of a certain place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getOpeningHoursSpecification() { return $this->openingHoursSpecification; } /** * Setter of Opening Hours Specification * * The opening hours of a certain place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of openingHoursSpecification */ public function setOpeningHoursSpecification($value) { $this->openingHoursSpecification = $value; } /** * Getter of Payment Accepted * * Cash, credit card, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPaymentAccepted() { return $this->paymentAccepted; } /** * Setter of Payment Accepted * * Cash, credit card, etc. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of paymentAccepted */ public function setPaymentAccepted($value) { $this->paymentAccepted = $value; } /** * Getter of Photo * * A photograph of this place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return mixed */ public function getPhoto() { return $this->photo; } /** * Setter of Photo * * A photograph of this place. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of photo */ public function setPhoto($value) { $this->photo = $value; } /** * Getter of Photos * * Photographs of this place (legacy spelling; see singular form, photo). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return array */ public function getPhotos() { return $this->photos; } /** * Setter of Photos * * Photographs of this place (legacy spelling; see singular form, photo). * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param array $value Value of photos */ public function setPhotos($value) { $this->photos = $value; } /** * Add photo to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of photo */ public function addPhoto($value) { $this->photos[] = $value; } /** * Remove photo to Array * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param mixed $value Value of photo */ public function removePhoto($value) { $key = array_search($value, $this->photos); if($key !== false) { unset($this->photos[$key]); } } /** * Getter of Price Range * * The price range of the business, for example $$$. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @return string */ public function getPriceRange() { return $this->priceRange; } /** * Setter of Price Range * * The price range of the business, for example $$$. * * @author SchemaGenerator <<EMAIL>> * * @access public * * @param string $value Value of priceRange */ public function setPriceRange($value) { $this->priceRange = $value; } }
d22b684d87bdf06e68e5658057ce5b32bc85c4f7
[ "Markdown", "PHP" ]
86
PHP
Dallas62/SchemaOrgRepository
32c047ec46e5613cf5494402659bbb18331c7ca8
78021ea7bc9051bf5cc663b0e6d74e7074acb951
refs/heads/master
<repo_name>qu10wenhao/MyList<file_sep>/mylist.cpp #include<iostream> class pop_error{ public: const char* msg(); }; const char* pop_error::msg(){ return "Error: you are trying to pop an empty list!"; } class cross_error{ public: const char* msg(); }; const char* cross_error::msg(){ return "Error: you've crossed the boundary!"; } template<class T> class MyList; template<class T> MyList<T> operator + (const MyList<T> &l1, const MyList<T> &l2); template<class T> MyList<T> operator + (const MyList<T> &l1, const T &item); template<class T> std::ostream & operator << (std::ostream &os, const MyList<T> &obj); template<class T> class MyList{ private: T *a; int size; int pos; void double_space(); public: MyList(){ pos = 0; size = 0; a = new T [size]; } MyList(int num, const T &item); MyList(const MyList<T> &l); MyList(T* arr, int len); void push(const T &item); T pop() throw(pop_error); void insert(int index, const T &item) throw(cross_error); void clean(); int get_size(); void erase(int start, int end) throw(cross_error); T get_item(int index) throw(cross_error); MyList get_item(int start, int end) throw(cross_error); int count(const T &item); void remove(const T &item); friend MyList<T> operator + <T>(const MyList<T> &l1, const MyList<T> &l2); friend MyList<T> operator + <T>(const MyList<T> &l1, const T &item); MyList<T> &operator = (const MyList<T> &l); MyList<T> &operator += (const T &item); MyList<T> &operator += (const MyList<T> &l); T &operator [](int index); friend std::ostream & operator<< <T>(std::ostream &os, const MyList<T> &obj); void sort(bool less=true); void reverse(); ~MyList(){delete [] a;} }; template<class T> void mysort(T* &a, int start, int end){ if(start >= end) return; int i=start,j=end,temp = start; T middle; while(j>i){ while(a[j]>a[temp] && j>start) j--; if(j!= start){ middle = a[j]; a[j] = a[temp]; a[temp] = middle; temp = j; } while(a[i]<a[temp] && i<end) i++; if(i!=end){ middle = a[i]; a[i] = a[temp]; a[i] = middle; temp = i; } } mysort(a,start,temp-1); mysort(a,temp+1,end); } using namespace std; int main(){ MyList<int> a, b; int i; for (i=0; i<5; ++i) a.push(i); // a = [0, 1, 2, 3, 4] a[3] = 15; // a = [0, 1, 2, 15, 4] a.sort(); // a = [0, 1, 2, 4, 15] a.reverse(); // a = [15, 4, 2, 1, 0] a += 12; // a = [15, 4, 2, 1, 0, 12] for (i=0; i<a.get_size(); ++i) cout<<a[i]<<endl; b = a.get_item(4, -3); // b = [] b = a.get_item(3, -1); // b = [1, 0, 12] a += b; // a = [15, 4, 2, 1, 0, 12, 1, 0, 12] for (i=0; i<a.get_size(); ++i) cout<<a.get_item(i)<<endl; cout<<a.count(5)<<endl; b.clean(); // b = [] cout<<b.get_size()<<endl; a.erase(2, 6); // a = [15, 4, 0, 12] b = a + a; // b = [15, 4, 0, 12, 15, 4, 0, 12] b.insert(3, 116); // b = [15, 4, 0, 116, 12, 15, 4, 0, 12] b.remove(4); // b = [15, 0, 116, ...] cout<<b<<endl; MyList<double> c(10, 3.14); for (i=0; i<100; ++i) c.push(1.1*i); cout<<c.get_item(100, 105)<<endl; return 0; } template<class T> void MyList<T>::double_space(){ T *b; if(size == 0) size = 1; size *= 2; b = new T [size]; for(int i=0;i<pos;i++) b[i] = a[i]; delete [] a; a = b; } template<class T> MyList<T>::MyList(int num, const T &item){ size = num; pos = 0; a = new T [size]; for(int i=0;i<num;++i) a[pos++] = item; } template<class T> MyList<T>::MyList(const MyList<T> &l){ size = l.size; pos = l.pos; a = new T [size]; for(int i=0;i<pos;++i) a[i] = l.a[i]; } template<class T> MyList<T>::MyList(T* arr, int len){ size = len; pos = len; a = new T [size]; for(int i=0;i<len;++i) a[i] = arr[i]; } template<class T> void MyList<T>::push(const T &item){ if(pos == size) double_space(); a[pos++] = item; } template<class T> T MyList<T>::pop() throw(pop_error){ if(pos == 0) throw pop_error(); T temp; temp = a[--pos]; a[pos].~T(); return temp; } template<class T> void MyList<T>::insert(int index, const T &item) throw(cross_error){ if(index > pos) throw cross_error(); push(item); for(int i=pos-1;i>index;--i){ a[i] = a[i-1]; } a[index] = item; } template<class T> void MyList<T>::clean(){ while(pos != 0) a[--pos].~T(); } template<class T> int MyList<T>::get_size(){ return pos; } template<class T> void MyList<T>::erase(int start, int end) throw(cross_error){ if(end<0) end += pos; if(start<0) start += pos; if(start < 0 || end < start || end >= pos) throw cross_error(); int middle = pos; pos = start; while(pos+end-start+1 < middle) a[pos++] = a[pos+end-start]; for(int i=pos;i<middle;i++) a[i].~T(); } template<class T> T MyList<T>::get_item(int index) throw(cross_error){ if(index < 0) index += pos; if(index >= pos || index < 0) throw cross_error(); return a[index]; } template<class T> MyList<T> MyList<T>::get_item(int start, int end) throw(cross_error){ if(start < 0) start += pos; if(end < 0) end += pos; if(start < 0 || end >= pos) throw cross_error(); MyList<T> x; if(end < start) return x; MyList<T> y(a+start, end-start+1); return y; } template<class T> int MyList<T>::count(const T &item){ int count = 0; for(int i=0;i<pos;++i){ if(a[i] == item) count++; } return count; } template<class T> void MyList<T>::remove(const T &item){ int i; for(i=0;i<pos;++i){ if(a[i] == item) break; } if(i != pos){ for(int j=i;j<pos-1;++j) a[j] = a[j+1]; a[--pos].~T(); } } template<class T> MyList<T> operator + (const MyList<T> &l1, const MyList<T> &l2){ MyList<T> x(l1); for(int i=0;i<l2.pos;++i) x.push(l2.a[i]); return x; } template<class T> MyList<T> operator + (const MyList<T> &l1, const T &item){ MyList<T> x(l1); x.push(item); return x; } template<class T> MyList<T>& MyList<T>::operator = (const MyList<T> &l){ size = l.size; pos = l.pos; delete [] a; a = new T [size]; for(int i=0;i<pos;++i) a[i] = l.a[i]; return *this; } template<class T> MyList<T>& MyList<T>::operator += (const T &item){ this->push(item); return *this; } template<class T> MyList<T>& MyList<T>::operator += (const MyList<T> &l){ *this = *this + l; return *this; } template<class T> T& MyList<T>::operator [](int index){ return (this->a)[index]; } template<class T> std::ostream & operator<<(std::ostream &os, const MyList<T> &obj){ os << '['; int i=0; if(obj.pos == 0){ os << ']'; return os; } for(i=0;i<obj.pos-1;++i) os << obj.a[i] << ", "; os << obj.a[i] << ']'; return os; } template<class T> void MyList<T>::sort(bool less){ mysort(a, 0, pos-1); if(!less) reverse(); } template<class T> void MyList<T>::reverse(){ int i=0,j=pos-1; T middle; while(j>i){ middle = a[i]; a[i] = a[j]; a[j] = middle; j--; i++; } }
0bdf72b10a64cb5c4bbc77daee4b0ef4bdd01620
[ "C++" ]
1
C++
qu10wenhao/MyList
d8e1436109301f9d3b02e956809287d5011cc76b
eef06cd3e3ad29819b48b187f0eb8cb27a931f40
refs/heads/master
<repo_name>davodaslanifakor/validator<file_sep>/regex-rule.js /* eslint-disable */ export const email = new RegExp(/^(([^<>()[\]\\.,;:\s@"]+(\.[^<>()[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/) export const password = new RegExp('^(?=.*[0-9])(?=.{8,})') export const passwordConfirm = (password,re_password)=> password === re_password <file_sep>/form.js import { passwordConfirm, email, password } from './regex-rule' class Errors { constructor () { this.errors = {} } get (field) { if (this.errors[field]) { return this.errors[field][0] } } has (field) { return this.errors.hasOwnProperty(field) } any () { return Object.keys(this.errors).length > 0 } record (errors) { this.errors = errors } clear (field) { if (field) { delete this.errors[field] } // this.errors = {} } } class Form { constructor (data) { this.orginlaData = data this.errors = new Errors() this.state = '' } push (name, item) { this.orginlaData[name].push(JSON.parse(JSON.stringify(item))) } setType ({ name, type }) { this.orginlaData[name].type = type } getValue ({ name }) { return this.orginlaData[name].value ? this.orginlaData[name].value : '' } clearValue ({ name }) { const oldValue = this.getValue({ name }) const newValue = typeof oldValue === 'object' ? null : '' this.setValue({ name, data: newValue }) } setRule ({ name, rule }) { this.orginlaData[name].rule = rule } reset () { for (const field in this.orginlaData) { this[field] = '' } this.errors.clear() } setValue ({ name, data }) { this.orginlaData[name].value = data } data () { const data = {} for (const field in this.orginlaData) { data[this.orginlaData[field].key] = this.orginlaData[field].value } return data } validate ({ fields = null }) { const errors = {} const data = this.orginlaData for (const resolveKey in data) { const rule = data[resolveKey].rule || [] const value = data[resolveKey].value const name = data[resolveKey].name for (let i = 0; i < rule.length; i++) { const item = rule[i] if (item === 'require') { if (!value || value === '') { if (!errors[name]) { errors[name] = [] errors[name].push('require') } else { errors[name].push('require') } } } if (item === 'email') { if (!email.test(String(value).toLowerCase())) { if (!errors[name]) { errors[name] = [] errors[name].push(item) } else { errors[name].push(item) } } } if (item === 'password') { if (!password.test(value)) { if (!errors[name]) { errors[name] = [] errors[name].push(item) } else { errors[name].push(item) } } } if (item === 'passwordConfirm') { if (!passwordConfirm(data.password.value, value)) { if (!errors[name]) { errors[name] = [] errors[name].push(item) } else { errors[name].push(item) } } } if (fields) { if (!fields.includes(name)) { delete errors[name] } } } } this.errors.record(errors) if (!this.errors.any()) { return {isValid:true} } else { return {isValid:false,errors} } } setState ({state}) { this.state = state } formState ({ state }) { return this.state === state } } export default Form
3782089eb3d041092cb3f549d42b8d6b66b08158
[ "JavaScript" ]
2
JavaScript
davodaslanifakor/validator
298cffa444053cd32b6744c08c1236d4a54db3d3
cdb5d6fbd2078f9a0b9db9a31acb7a3ad76ec013
refs/heads/master
<repo_name>DAHarder/CodingPractice<file_sep>/Python/Advent_of_code_2022/Day_2/day_2_rockpaperscissors.py data_file = open("data.txt", "r") score = 0 full_array = [] count = 0 values = { "AY": 4, "AX": 3, "AZ": 8, "BY": 5, "BX": 1, "BZ": 9, "CY": 6, "CX": 2, "CZ": 7 } # ROCK 1 # PAPER 2 # SCISSOR 3 # DRAW 3 # LOSE 0 # WIN 6 # a = ROCK # b = PAPER # c = SCISSORS # x = lose # y = draw # z = win for line in data_file: line_array = line.replace(" ", "").strip() full_array.append(line_array) for item in full_array: score += values.get(item) print (score) data_file.close() <file_sep>/Python/Advent_of_code_2022/Day_1/day_1.py data_file = open("data.txt", "r") elf_number = 1 elf_most_calories1 = 0 calorie_count = 0 highest_calorie = 0 highest_calorie2 = 0 highest_calorie3 = 0 data_array = [] for line in data_file: additem = line.strip() if (additem == ""): data_array.append(additem) else: data_array.append(int(additem)) for item in data_array: if (item == ""): if (calorie_count > highest_calorie): highest_calorie = calorie_count calorie_count = 0 else: calorie_count += item calorie_count = 0 for item in data_array: if (item == ""): if (calorie_count > highest_calorie2 and calorie_count != highest_calorie): highest_calorie2 = calorie_count calorie_count = 0 else: calorie_count += item calorie_count = 0 for item in data_array: if (item == ""): if (calorie_count > highest_calorie3 and calorie_count != highest_calorie2 and calorie_count != highest_calorie): highest_calorie3 = calorie_count calorie_count = 0 else: calorie_count += item combined_calorie = highest_calorie + highest_calorie2 + highest_calorie3 print (f"Elf Number: {elf_number} \n \ highest calorie: {highest_calorie} \n \ highest calorie2: {highest_calorie2} \n \ highest calorie3: {highest_calorie3} \n \ combined calorie: {combined_calorie}") data_file.close()<file_sep>/MathEquation.py def mathEquation(): import re i = 0 while i < 5: mathString = input( "Enter a simple math equation in the format xx+xx (example: 12 + 44) You may only use the following four operators (+ - * /)") mathString = mathString.replace(" ", "") validationVar = re.search("^\d+[+*/-]\d+$", mathString) divideByZeroVar = re.search("[/][0]", mathString) if validationVar == None: print("That is not a valid equation, please try again") elif divideByZeroVar != None: print("Jesus, are you trying to destroy the universe? Try again fool!") else: # print(validationVar) print("the math equation is", mathString) break #print(i) i += 1 print("The answer is " + str(round(eval(mathString),2))) mathEquation()<file_sep>/Python/Advent_of_code_2022/Day 3/day_3_rucksack.py data_file = open("data.txt", "r") line_array = [] result_value = 0 for line in data_file: line_data = [] mid_point = int(len(line) / 2) line_data.append(line[:mid_point].strip()) line_data.append(line[mid_point:].strip()) line_array.append(line_data) for item in line_array: first_dict = {} for character in item[0]: first_dict[character] = 1 for character2 in item[1]: if (character2 in first_dict): if (first_dict[character2] > 0): if (character2.islower()): result_value += (ord(character2)-96) first_dict[character2] -= 1 else: result_value += (ord(character2)-38) first_dict[character2] -= 1 print (result_value) data_file.close() <file_sep>/Java/Data_Structures/src/com/company/LinkedList.java package com.company; //Linked List public class LinkedList { Node head; Node tail; public void add(int data){ // LL is emtpy if (this.head == null) { Node newNode = new Node(data); this.head = newNode; this.tail = newNode; } //LL not emtpy else { Node newNode = new Node(data); newNode.previous = this.tail; this.tail = newNode; this.tail = newNode; } } public void listTraverse(LinkedList list) { Node curNode = list.head; while (curNode != null){ System.out.println(curNode.data); curNode = curNode.next; } } } //Node class Node { int data; Node next; Node previous; public Node(int data) { this.data = data; } } <file_sep>/Java/Data_Structures/src/com/company/ArraysTesting.java package com.company; import java.util.*; public class ArraysTesting { public static int[] sortedSquares(int[] nums) { int n = 0; int j = nums.length - 1; int[] result = new int[nums.length]; for (int i = nums.length - 1; i >= 0; i--) { if (Math.abs(nums[n]) > Math.abs(nums[j])){ result[i] = nums[n] * nums[n]; n++; } else{ result[i] = nums[j]*nums[j]; j--; } } return result; } public static void duplicateZeros(int[] arr) { int[] sArray = new int[arr.length]; int j = 0; for (int i = 0; i < arr.length; i++) { if (arr[j] == 0) { sArray[i] = 0; i++; if (i < arr.length) { sArray[i] = 0; j++; } } else { sArray[i] = arr[j]; j++; } } for (int i = 0; i < arr.length; i++) { arr[i] = sArray[i]; } } public static void merge(int[] nums1, int m, int[] nums2, int n) { int j = m-1; //nums1 int k = n-1; //nums2 int i = m+n-1; //full while (j >= 0 && k >= 0){ if (nums1[j] > nums2[k]) nums1[i--] = nums1[j--]; else nums1[i--] = nums2[k--]; } while (k >= 0) nums1[i--] = nums2[k--]; } public static int removeElement(int[] nums, int val) { int m = 0; for (int i = 0; i < nums.length; i++) { if (nums[i] != val) nums[m++] = nums[i]; } return m; } public static int removeDuplicates(int[] nums) { int i = 0; for (int n : nums) { if (i == 0 || n > nums[i-1]) nums[i++] = n; } return i; } public static boolean checkIfExist(int[] arr) { HashSet<Integer> hashSet = new HashSet<>(); for (int n : arr) { if (hashSet.contains(n*2) || (hashSet.contains(n/2) && n%2 == 0)) return true; else hashSet.add(n); } return false; } public static boolean validMountainArray(int[] arr) { if (arr.length < 3) return false; int start = 0; int end = arr.length - 1; while (start < end) { if (arr[start + 1] > arr[start]) start++; else if (arr[end - 1] > arr[end]) end--; else break; } return start != 0 && end != arr.length - 1 && start == end; } public static int[] replaceElements(int[] arr) { if (arr.length == 1) { arr[0] = -1; return arr; } int mx = -1, n = arr.length - 1, a = 0; for (int i = n; i >= 0; i--) { a = arr[i]; arr[i] = mx; mx = Math.max(mx, a); } return arr; } public static void moveZeroes(int[] nums) { if (nums == null || nums.length == 0) { return; } int cur = 0; for (int i = 0; i < nums.length; i++) { if (nums[i] != 0) { int temp = nums[cur]; nums[cur++] = nums[i]; nums[i] = temp; } } } public static int heightChecker(int[] heights) { int result = 0; int n = 0; int[] heightFreq = new int[101]; for (int height: heights) { heightFreq[height]++; } for (int height: heights){ while(heightFreq[n] == 0) n++; if (height != n) result++; heightFreq[n]--; } return result; } public static int thirdMax(int[] nums) { Integer first = null; Integer second = null; Integer third = null; for(Integer n : nums){ if (n.equals(first) || n.equals(second) || n.equals(third)) continue; if (first == null || n > first) { third = second; second = first; first = n; } else if (second == null || n > second) { third = second; second = n; } else if (third == null || n > third) third = n; } if (second == null || third == null) return first.intValue(); return third.intValue(); } public static List<Integer> findDisappearedNumbers(int[] nums) { List<Integer> res = new ArrayList<>(); int n = nums.length; // the numbers go from 1 to the length of array. // this takes the value - 1, goes to that index and // adds the array length to it. // if any of the numbers dont add to an index, that // value wont increase by the array length for (int i = 0; i < nums.length; i ++) nums[(nums[i]-1) % n] += n; // if any values havent increased by the array length, // then that index - 1 is doesnt exist in the original array. for (int i = 0; i < nums.length; i ++) if (nums[i] <= n) res.add(i+1); return res; } public int[] intersect(int[] nums1, int[] nums2) { Map<Integer, Integer> map = new HashMap<>(); List<Integer> resultArray = new ArrayList<>(); for(int n : nums1) { Integer count = map.containsKey(n) ? map.get(n) : 0; map.put(n, ++count); } for(int n : nums2) if(map.containsKey(n) && map.get(n) > 0){ Integer count = set.get(n); resultArray.add(n); map.put(n, --count) } int[] result = new int[resultArray.size()]; for(int i = 0; i < resultArray.size(); i++){ result[i] = resultArray.get(i).intValue(); } map. return result; } }
fca23ea02367cf99755299209a9c419b8be77d31
[ "Java", "Python" ]
6
Python
DAHarder/CodingPractice
0110588f2a05afc7991c80994dc05003a22107e5
d233ca1305fc36e22481d3448680b8628fae1209
refs/heads/master
<file_sep>#include <curl/curl.h> #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <string.h> #include <sys/select.h> #include <sys/time.h> #include <errno.h> #include <assert.h> #if 0 #define VLOG printf #else #define VLOG if (0) printf #endif #define LOG printf #define CURL_CALL(op) \ { \ const CURLcode res = op; \ if (res) { \ fprintf(stderr, "Curl easy failure: %s (%s)\n", \ curl_easy_strerror(res), #op); \ return res; \ } \ } #define CURLM_CALL(op) \ { \ const CURLMcode res = op; \ if (res) { \ fprintf(stderr, "Curl multi failure: %s (%s)\n", \ curl_multi_strerror(res), #op); \ return res; \ } \ } static size_t headerCallback(void * dataPtr, size_t size, size_t nmemb, void *userdata) { size *= nmemb; size_t colon = 0; const char *data = (const char*)dataPtr; while (colon < size) { if (data[colon] == ':') { break; } else if (isspace(data[colon])) { char *httpVersion = (char*)(userdata); if (size >= 8 && !strncmp(data, "HTTP/", 5)) { strncpy(httpVersion, data + 5, 3); } break; } ++colon; } return size; } static size_t dataCallback(void * data, size_t size, size_t nmemb, void * userdata) { *((int*)userdata) += (size * nmemb); VLOG("GOT DATA %d\n", *((int*)userdata)); (void)data; (void)userdata; return size * nmemb; } struct Request { char *url; struct Request *next; char httpVersion[4]; CURL *easy; int written; }; static struct Request *create(struct Request *head, const char *url) { struct Request *req = (struct Request*)calloc(1, sizeof(struct Request)); req->url = strdup(url); if (!head) { head = req; } else { struct Request *tmp = head; while (tmp->next) tmp = tmp->next; tmp->next = req; } return head; } static int addRequest(CURLM *multi, struct Request **reqPtr) { struct Request *req = *reqPtr; req->easy = curl_easy_init(); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_URL, req->url)); VLOG("SETTING URL %s - %p\n", req->url, req->easy); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_0)); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_HEADERFUNCTION, headerCallback)); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_HEADERDATA, &req->httpVersion)); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_WRITEFUNCTION, dataCallback)); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_SSL_VERIFYHOST, 0)); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_SSL_VERIFYPEER, 0)); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_PIPEWAIT, 1)); CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_WRITEDATA, &req->written)); /* CURL_CALL(curl_easy_setopt(req->easy, CURLOPT_VERBOSE, 1)); */ CURLM_CALL(curl_multi_add_handle(multi, req->easy)); *reqPtr = req->next; return CURLE_OK; } static int process(CURLM *multi, struct Request **head) { int done; do { done = 0; fd_set r, w; FD_ZERO(&r); FD_ZERO(&w); int maxFD = 0; CURLM_CALL(curl_multi_fdset(multi, &r, &w, 0, &maxFD)); long maxTime = -1; CURL_CALL(curl_multi_timeout(multi, &maxTime)); struct timeval t = { 0, 1000 }; if (maxTime > 0) { t.tv_sec = maxTime / 1000; t.tv_usec = (maxTime % 1000) * 1000; } VLOG("maxtime %ld\n", maxTime); if (maxFD > 0) { VLOG("SELECTING for %ld.%03ld (%ld)\n", t.tv_sec, (long)(t.tv_usec / 1000), maxTime); const int ret = select(maxFD , &r, &w, 0, &t); if (ret == -1 && errno == EINTR) continue; } CURLMcode m; do { int tmp; m = curl_multi_perform(multi, &tmp); } while (m == CURLM_CALL_MULTI_PERFORM); if (m) { fprintf(stderr, "Curl multi perform failure: %s\n", curl_multi_strerror(m)); return m; } CURLMsg *msg; int tmp; while ((msg = curl_multi_info_read(multi, &tmp)) != 0) { if (msg->msg == CURLMSG_DONE) { struct Request *req = *head; struct Request *prev = 0; while (req && req->easy != msg->easy_handle) { prev = req; req = req->next; } if (prev) { assert(req != *head); prev->next = req->next; } else { assert(req == *head); *head = req->next; } if (msg->data.result) { LOG("Request failed %s %s\n", req->url, curl_easy_strerror(msg->data.result)); } else { long status; CURL_CALL(curl_easy_getinfo(msg->easy_handle, CURLINFO_RESPONSE_CODE, &status)); LOG("Request finished %s %s => %ld\n", req->url, req->httpVersion, status); } /* VLOG("ABOUT TO CLEANUP %p\n", ((struct SessionHandle*)easy->easy_conn); */ curl_multi_remove_handle(multi, msg->easy_handle); curl_easy_cleanup(msg->easy_handle); free(req->url); free(req); done = 1; break; } } } while (!done); return CURLE_OK; } int main(int argc, char **argv) { const char *urlPrefix = "https://dtaserver.corp.netflix.com:8081/files"; const char *urls[] = { "/data-1k", "/data-10k", "/data-100k", "/data-1m", "/data-10m", "/data-50m" }; enum { Parallel, Sequential } mode = Parallel; for (int i=1; i<argc; ++i) { if (!strcmp(argv[i], "--parallel") || !strcmp(argv[i], "-p")) { mode = Parallel; } else if (!strcmp(argv[i], "--sequential") || !strcmp(argv[i], "-s")) { mode = Sequential; } else if (!strncmp(argv[i], "--url-prefix=", 13)) { urlPrefix = argv[i] + 13; } } struct Request *head = 0; const size_t count = sizeof(urls) / sizeof(urls[0]); for (size_t i=0; i<count; ++i) { char url[1024]; snprintf(url, sizeof(url), "%s%s", urlPrefix, urls[i]); head = create(head, url); } struct Request *next = head; CURLM *multi = curl_multi_init(); assert(multi); CURLM_CALL(curl_multi_setopt(multi, CURLMOPT_PIPELINING, CURLPIPE_HTTP1|CURLPIPE_MULTIPLEX)); if (mode == Parallel) { while (next) { if (addRequest(multi, &next) != 0) return 1; } while (head) { if (process(multi, &head)) { return 1; } } } else { while (next) { if (addRequest(multi, &next) != 0) return 1; if (process(multi, &head)) return 1; } } curl_multi_cleanup(multi); return 0; } <file_sep>cmake_minimum_required(VERSION 2.8) include_directories(${CMAKE_CURRENT_LIST_DIR}) # set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g") add_executable(curltest main.c) # if (NOT PREFIX) # set(PREFIX /usr/local/i686-netflix-linux-gnu-4.3/netflix) # endif () set(STUFF /Users/abakken/local) include_directories(${STUFF}/include) target_link_libraries(curltest -L${STUFF}/lib/) # -Wl,-rpath=${STUFF}/lib/) target_link_libraries(curltest curl)
9617d448498571b913606c51c7cf09351fb0f91e
[ "C", "CMake" ]
2
C
Andersbakken/curltest
75bb4dc9a9d32d222efa4ecde8fcfc831b1e85ab
9e2e50557147e84a1de4a69af2088c76e77bf09e
refs/heads/master
<repo_name>Adian13/PlayForLearnPFL<file_sep>/app/src/main/java/com/example/adi/playforlearnpfl/Alunno/HomeAlunno.java package com.example.adi.playforlearnpfl.Alunno; import android.content.DialogInterface; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.AlertDialog; import android.support.v7.app.AppCompatActivity; import android.view.ContextMenu; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.widget.Button; import android.widget.ImageButton; import com.example.adi.playforlearnpfl.Games; import com.example.adi.playforlearnpfl.LoginActivity; import com.example.adi.playforlearnpfl.R; import com.example.adi.playforlearnpfl.Record.RecordPersonali; import com.example.adi.playforlearnpfl.Record.RecordTop10; //import static com.example.adi.playforlearn10.Creatore.giochiDaLanciare; public class HomeAlunno extends AppCompatActivity { // credo che queste variabili debbano stare in un'altra classe public static final String PRIMA = "PRIMA"; public static final String ITALIANO = "ITALIANO"; public static final String STORIA = "STORIA"; public static final String GEOGRAFIA = "GEOGRAFIA"; public static final String INGLESE = "INGLESE"; public static final String MATEMATICA = "MATEMATICA"; public static final String MATERIA = "MATERIA"; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.home_alunno); getSupportActionBar().setTitle("Home alunno"); final Bundle utenteBundle =getIntent().getBundleExtra("utente"); // Bind buttons ImageButton account = (ImageButton) findViewById(R.id.btAccount); Button btItaliano= (Button)findViewById(R.id.btItaliano); Button btStoria =(Button)findViewById(R.id.btStoria); Button btMatematica = (Button)findViewById(R.id.btMatematica); Button btGeografia= (Button)findViewById(R.id.btGeografia); Button btInglese= (Button)findViewById(R.id.btInglese); // Bind image buttons ImageButton btMyRecord =(ImageButton) findViewById(R.id.myRecord); ImageButton btTop10 = (ImageButton) findViewById(R.id.btTop10); btTop10.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Intent i = new Intent(getApplicationContext(), RecordTop10.class); i.putExtra("utente", utenteBundle); startActivity(i); finish(); } }); btMyRecord.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Intent i = new Intent(getApplicationContext(), RecordPersonali.class); i.putExtra("utente", utenteBundle); startActivity(i); finish(); } }); account.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Intent i = new Intent(getApplicationContext(), AccountAlunno.class); i.putExtra("utente", utenteBundle); startActivity(i); finish(); } }); OnMateriaButtonClick onMateriaButtonClick = new OnMateriaButtonClick(); btItaliano.setOnClickListener(onMateriaButtonClick); btStoria.setOnClickListener(onMateriaButtonClick); btMatematica.setOnClickListener(onMateriaButtonClick); btGeografia.setOnClickListener(onMateriaButtonClick); btInglese.setOnClickListener(onMateriaButtonClick); } /*public ArrayList<Gioco> caricaGioco(Button bt) { if (bt.getText().equals(ITALIANO)) { giochiDaLanciare.addAll(Creatore.giochiItaliano); } else if (bt.getText().equals(STORIA)) { giochiDaLanciare.addAll(Creatore.giochiStoria); } else if (bt.getText().equals(MATEMATICA)) { giochiDaLanciare.addAll(Creatore.giochiMatematica); } else if (bt.getText().equals(GEOGRAFIA)) { giochiDaLanciare.addAll(Creatore.giochiGeografia); } else if (bt.getText().equals(INGLESE)) { giochiDaLanciare.addAll(Creatore.giochiInglese); } return giochiDaLanciare; }*/ @Override public boolean onCreateOptionsMenu(Menu menu) { MenuInflater inflater=getMenuInflater(); inflater.inflate(R.menu.menu_main,menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { Bundle utenteBundle= getIntent().getBundleExtra("utente"); int id=item.getItemId(); switch(id) { case R.id.info: Intent i= new Intent(getApplicationContext(), HomeAlunno.class); i.putExtra("utente", utenteBundle); startActivity(i); finish(); /* Codice di gestione della voce MENU_1 */ break; case R.id.impostazioni: /* Codice di gestione della voce MENU_2 */ } return false; } @Override public void onBackPressed() { AlertDialog.Builder builder = new AlertDialog.Builder(HomeAlunno.this); builder.setCancelable(true); builder.setMessage("Uscire?"); builder.setPositiveButton("Si", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { startActivity(new Intent(getApplicationContext(),LoginActivity.class)); finish(); } }); builder.setNegativeButton("No", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.dismiss(); } }); builder.create().show(); } /** * Listener per i pulsanti delle materie */ private class OnMateriaButtonClick implements View.OnClickListener{ @Override public void onClick(View v) { Bundle utenteBundle= getIntent().getBundleExtra("utente"); Intent i = new Intent(getApplicationContext(),Games.class); String materia = ((Button)v).getText().toString(); materia = materia.toUpperCase(); i.putExtra(MATERIA,materia); i.putExtra("utente", utenteBundle); startActivity(i); finish(); } } } <file_sep>/app/src/main/java/com/example/adi/playforlearnpfl/Insegnante/AccountMaestra.java package com.example.adi.playforlearnpfl.Insegnante; import android.content.DialogInterface; import android.content.Intent; import android.content.SharedPreferences; import android.os.Bundle; import android.support.v7.app.AlertDialog; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.TextView; import com.example.adi.playforlearnpfl.Alunno.HomeAlunno; import com.example.adi.playforlearnpfl.LoginActivity; import com.example.adi.playforlearnpfl.R; import com.example.adi.playforlearnpfl.Registrazione; import com.example.adi.playforlearnpfl.RegistrazioneInsegnante; //import com.example.adi.playforlearnpfl.Registrazione; /** * AccountMaestra è la classe che consente di visualizzare le informazioni inerenti ad ogni utente amministratore registratosi * nel sistema PlayForLearn. Le informazioni dell'utente sono : nome,cognome,username e password. Inoltre la suddetta classe * ha a disposizione una serie di pulsanti da cui: * -Permette di registrare un account utente. * -Permettere di fare il logout dal sistema e infine permette il logout del sistema. * */ public class AccountMaestra extends AppCompatActivity { AlertDialog.Builder miaAlert; private TextView t1, t2, t3, t4; protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.account_maestra); miaAlert = new AlertDialog.Builder(this); t1 = (TextView) findViewById(R.id.username); t3 = (TextView) findViewById(R.id.nome); t4 = (TextView) findViewById(R.id.cognome); SharedPreferences prefs = getSharedPreferences(RegistrazioneInsegnante.SharedPrefName, 0); this.t1.setText(prefs.getString("username", "username non trovato")); this.t3.setText(prefs.getString("nome", "nome non trovato")); this.t4.setText(prefs.getString("cognome", "cognome non trovato")); getSupportActionBar().setTitle("Account Maestra"); } @Override public void onBackPressed() { super.onBackPressed(); Intent i = new Intent(getApplicationContext(), HomeMaestra.class); startActivity(i); finish(); } /** * Permette di uscire dal login * @param v mostra un dialolg per la scelta di uscire o meno dall'account * */ public void logout(View v){ miaAlert.setTitle("Vuoi davvero uscire dal sistema?"); miaAlert.setCancelable(false); miaAlert.setPositiveButton("No", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { dialog.dismiss(); } }); miaAlert.setNegativeButton("Si", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { Intent i = new Intent(getApplicationContext(), LoginActivity.class); startActivity(i); finish(); } }); AlertDialog alert = miaAlert.create(); alert.show(); } /** * rimanda alla pagina di registrazione * @param v la funzione viene chiamata al click del pulsante */ public void aggiungiUtente(View v) { Intent i = new Intent(getApplicationContext(), Registrazione.class); startActivity(i); finish(); } /** * Rimanda alla pagina principale del sistema * @param v la funzione viene chiamata al click del pulsante */ public void home(View v) { Intent i = new Intent(getApplicationContext(), HomeMaestra.class); startActivity(i); finish(); } } <file_sep>/app/src/main/java/com/example/adi/playforlearnpfl/ListaAlunni.java package com.example.adi.playforlearnpfl; import android.app.AlertDialog; import android.content.DialogInterface; import android.content.Intent; import android.os.AsyncTask; import android.os.Bundle; import android.support.design.widget.FloatingActionButton; import android.support.design.widget.Snackbar; import android.support.v7.app.AppCompatActivity; import android.util.Log; import android.view.View; import android.widget.Button; import android.widget.EditText; import android.widget.ImageButton; import android.widget.ListView; import android.widget.RadioButton; import com.example.adi.playforlearnpfl.Alunno.HomeAlunno; import com.example.adi.playforlearnpfl.Giochi.FineGioco; import com.example.adi.playforlearnpfl.Insegnante.HomeMaestra; import org.json.JSONException; import org.json.JSONObject; import java.io.BufferedWriter; import java.io.IOException; import java.io.OutputStreamWriter; import java.net.HttpURLConnection; import java.net.URL; import java.util.ArrayList; import java.util.Scanner; /** * ListaAlunni è una classe che rappresenta la lista degli alunni della classe "Prima" con i rispettivi record. La classe * viene gestita dal json che è un semplice formato per lo scambio di dati. Il json restituirà alla fine l'username e il * record del singolo utente. */ public class ListaAlunni extends AppCompatActivity { private static ArrayList<AlunnoInLista> alunni; public EditText edit; AlunnoInLista alunno; ArrayList<AlunnoInLista> alunniDaVisualizzare; AlunniInListaListAdapter customAdapter, cercati; AlertDialog.Builder miaAlert; private boolean trovato = false; ImageButton lente; String str = "", cognome, username, nome; FloatingActionButton floatingActionButtonSearch; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.lista_alunni); edit = (EditText) findViewById(R.id.edit); String str = edit.getText().toString(); getSupportActionBar().setTitle("Lista Alunni"); miaAlert = new AlertDialog.Builder(this); lente = (ImageButton) findViewById(R.id.lente); lente.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { ricerca(edit.getText().toString()); } }); popola(); } public boolean ricerca(String str) { str = edit.getText().toString(); int i = 0, k = 0; int j = str.length() + i; for (i = 0; i < str.length(); i++) { if (j < str.length()) { alunno = alunni.get(k); if (alunno.getNome().substring(i, j).compareToIgnoreCase(str) == 0) { trovato = true; alunniDaVisualizzare.add(alunni.get(i)); popola(); } else trovato = false; } k++; j++; return trovato; } return trovato; } private void setTextLista() { ListView ll = (ListView) findViewById(R.id.lista); customAdapter = new AlunniInListaListAdapter(getApplicationContext(), R.layout.lista_row_alunni, alunni); ll.setAdapter(customAdapter); //Log.d("DEBUG_alunni", String.valueOf(alunni.size())); } private void popola() { new AsyncTask<Object, Object, Object>() { @Override protected void onPreExecute() { alunni = new ArrayList<AlunnoInLista>(10); } @Override protected Object doInBackground(Object... params) { String nome, record; try { String ip = LoginActivity.INDIRIZZO; int porta = 80; String nomeFile = "webservice/getAlunniPerClasse.php"; URL url = new URL("http", ip, porta, nomeFile); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setDoInput(true); connection.setDoOutput(true); BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(connection.getOutputStream())); Log.d("DEBUG_list", getIntent().getStringExtra("classe")); bufferedWriter.write("classe=" + getIntent().getStringExtra("classe")); bufferedWriter.close(); Scanner scanner = new Scanner(connection.getInputStream()); String response = ""; JSONObject obj = null; while (scanner.hasNext()) { response = scanner.nextLine(); Log.d("DEBUG", response); obj = new JSONObject(response); Log.d("DEBUG_response", response); nome = obj.getString("nome"); cognome = obj.getString("cognome"); username = obj.getString("username"); //record = obj.getString("record"); alunni.add(new AlunnoInLista(nome, cognome, username)); /* ,record*/ } } catch (IOException e) { e.printStackTrace(); } catch (JSONException e) { e.printStackTrace(); } return null; } @Override protected void onPostExecute(Object o) { setTextLista(); } }.execute(); } public void onBackPressed() { //super.onBackPressed(); Intent i = new Intent(getApplicationContext(), HomeMaestra.class); startActivity(i); finish(); } /*View.OnClickListener onClickListener = new View.OnClickListener() { @Override public void onClick(View v) { android.support.v7.app.AlertDialog.Builder builder = new android.support.v7.app.AlertDialog.Builder(ListaAlunni.this); builder.setCancelable(true); builder.setMessage("Chi vuoi cercare?"); builder.setView(R.layout.layout_search); final EditText edit = (EditText) findViewById(R.id.edit_name_search); final ImageButton btsrc= (ImageButton) findViewById(R.id.btsrc); builder.setPositiveButton("Si", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { ricerca(edit.getText().toString()); } }); builder.setNegativeButton("No", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.dismiss(); } }); builder.create().show(); } };*/ public class AlunnoInLista{ String nome, cognome, username/*, record*/; private AlunnoInLista(String nome, String cognome, String username){ /* String record*/ this.nome= nome; this.cognome = cognome; this.username= username; // this.record= record; } // public String getRecord() { return record; } // public void setRecord(String record) { this.record = record; } public String getNome() { return nome; } public void setNome(String nome) { this.nome = nome; } public String getCognome() { return cognome; } public void setCognome(String cognome) { this.cognome = cognome; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } } }<file_sep>/app/src/main/java/com/example/adi/playforlearnpfl/RegistrazioneInsegnante.java package com.example.adi.playforlearnpfl; import android.content.Intent; import android.content.SharedPreferences; import android.os.AsyncTask; import android.support.design.widget.TextInputEditText; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.util.Log; import android.view.View; import android.widget.Button; import android.widget.CheckBox; import android.widget.Toast; import java.io.BufferedWriter; import java.io.IOException; import java.io.OutputStreamWriter; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.util.ArrayList; import java.util.Scanner; public class RegistrazioneInsegnante extends AppCompatActivity { private Button btRegistrati, btAnnulla; private CheckBox cbMateriaItaliano, cbMateriaMatematica, cbMateriaInglese, cbMateriaStoria, cbMateriaGeografia; private CheckBox cbPrima, cbSeconda, cbTerza, cbQuarta, cbQuinta; private TextInputEditText tietNome, tietCognome, tietUsername, tietPassword; private String nome, cognome, username, password; private ArrayList<String> classi, materie; public static final String SharedPrefName = "Shared"; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_registrazione_insegnante); getSupportActionBar().setTitle("Registrazione insegnante"); cbPrima = (CheckBox)findViewById(R.id.cbPrima); cbSeconda = (CheckBox)findViewById(R.id.cbSeconda); cbTerza = (CheckBox)findViewById(R.id.cbTerza); cbQuarta = (CheckBox)findViewById(R.id.cbQuarta); cbQuinta = (CheckBox)findViewById(R.id.cbQuinta); cbMateriaItaliano = (CheckBox)findViewById(R.id.cbMateriaItaliano); cbMateriaMatematica = (CheckBox)findViewById(R.id.cbMateriaMatematica); cbMateriaInglese = (CheckBox)findViewById(R.id.cbMateriaInglese); cbMateriaStoria = (CheckBox)findViewById(R.id.cbMateriaStoria); cbMateriaGeografia = (CheckBox)findViewById(R.id.cbMateriaGeografia); tietNome = (TextInputEditText)findViewById(R.id.tietNome); tietCognome = (TextInputEditText)findViewById(R.id.tietCognome); tietUsername = (TextInputEditText)findViewById(R.id.tietUsername); tietPassword = (TextInputEditText)findViewById(R.id.tietPassword); classi = new ArrayList<>(6); materie = new ArrayList<>(6); btAnnulla = (Button)findViewById(R.id.btAnnulla); btAnnulla.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(RegistrazioneInsegnante.this, LoginActivity.class)); finish(); } }); btRegistrati = (Button)findViewById(R.id.btRegistrati); btRegistrati.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { // Controllo i campi tietNome.setError(null); tietCognome.setError(null); tietUsername.setError(null); tietPassword.setError(null); if(tietNome.length() != 0){ nome = tietNome.getText().toString(); if(nome.trim().length() == 0){ tietNome.setError("Non può essere vuoto"); return; } }else{ tietNome.setError("Non può essere vuoto"); return; } if(tietCognome.length() != 0){ cognome = tietCognome.getText().toString(); if(cognome.trim().length() == 0){ tietCognome.setError("Non può essere vuoto"); return; } }else{ tietCognome.setError("Non può essere vuoto"); return; } if(tietUsername.length() != 0){ username = tietUsername.getText().toString(); if(username.trim().length() == 0){ tietUsername.setError("Non può essere vuoto"); return; } }else{ tietUsername.setError("Non può essere vuoto"); return; } if(tietPassword.length() != 0){ password = tietPassword.getText().toString(); if(password.trim().length() == 0){ tietPassword.setError("Non può essere vuoto"); return; } }else{ tietPassword.setError("Non può essere vuoto"); return; } if(cbMateriaItaliano.isChecked()) materie.add("Italiano"); if(cbMateriaMatematica.isChecked()) materie.add("Matematica"); if(cbMateriaInglese.isChecked()) materie.add("Inglese"); if(cbMateriaStoria.isChecked()) materie.add("Storia"); if(cbMateriaGeografia.isChecked()) materie.add("Geografia"); if(materie.isEmpty()){ Toast.makeText(RegistrazioneInsegnante.this, "Bisogna selezionare almeno una materia.", Toast.LENGTH_SHORT).show(); return; } if(cbPrima.isChecked()) classi.add("Prima"); if(cbSeconda.isChecked()) classi.add("Seconda"); if(cbTerza.isChecked()) classi.add("Terza"); if(cbQuarta.isChecked()) classi.add("Quarta"); if(cbQuinta.isChecked()) classi.add("Quinta"); if(classi.isEmpty()){ Toast.makeText(RegistrazioneInsegnante.this, "Bisogna selezionare almeno una classe.", Toast.LENGTH_SHORT).show(); return; } new AsyncTask<Object,Object,Object>(){ @Override protected Object doInBackground(Object... params) { try { String ip = LoginActivity.INDIRIZZO; int porta = 80; String nomeFile = "webservice/registrazioneUtente.php"; URL url = new URL("http",ip, porta, nomeFile); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setChunkedStreamingMode(0); connection.setDoInput(true); connection.setDoOutput(true); BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(connection.getOutputStream())); String toSend = "nome=" + nome + "&cognome=" + cognome + "&username=" + username + "&password=" + <PASSWORD> + "&tipologia=insegnante&classi="; for(String classe : classi){ toSend += classe; if(classi.indexOf(classe) != classi.size()-1) toSend += "|"; } toSend+="&materie="; for(String materia : materie){ toSend += materia; if(materie.indexOf(materia) != materie.size()-1) toSend+="|"; } bufferedWriter.write(toSend); bufferedWriter.flush(); Scanner scanner = new Scanner(connection.getInputStream()); String response = scanner.nextLine(); switch(response){ case " ": case "": break; case "Error:0": publishProgress("Errore nella creazione dell'utente. Già esistente?"); break; case "Error:2": publishProgress("Errore nella creazione dell'insegnante. Contattare l'admin"); // si dovrebbe eliminare l'utente break; case "All done": publishProgress("Registrazione avvenuta con successo. Prego, effettuare il login."); startActivity(new Intent(RegistrazioneInsegnante.this, LoginActivity.class)); finish(); break; } } catch (MalformedURLException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return null; } @Override protected void onProgressUpdate(Object... values) { Toast.makeText(RegistrazioneInsegnante.this, values[0].toString(), Toast.LENGTH_SHORT).show(); } }.execute(); SharedPreferences prefs = RegistrazioneInsegnante.this.getSharedPreferences("Shared", 0); SharedPreferences.Editor prefsEditor = prefs.edit(); prefsEditor.putString("username", RegistrazioneInsegnante.this.tietUsername.getText().toString()); prefsEditor.putString("nome", RegistrazioneInsegnante.this.tietNome.getText().toString()); prefsEditor.putString("cognome", RegistrazioneInsegnante.this.tietCognome.getText().toString()); prefsEditor.commit(); } }); } @Override public void onBackPressed() { btAnnulla.performClick(); } } <file_sep>/app/src/main/java/com/example/adi/playforlearnpfl/RegistrazioneAlunno.java package com.example.adi.playforlearnpfl; import android.content.Intent; import android.content.SharedPreferences; import android.os.AsyncTask; import android.support.design.widget.TextInputEditText; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.util.Log; import android.view.View; import android.widget.Button; import android.widget.CheckBox; import android.widget.CompoundButton; import android.widget.RadioButton; import android.widget.RadioGroup; import android.widget.Toast; import org.json.JSONObject; import java.io.BufferedWriter; import java.io.IOException; import java.io.OutputStreamWriter; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.util.Scanner; public class RegistrazioneAlunno extends AppCompatActivity { private TextInputEditText tietNome, tietCognome, tietUsername, tietPassword; private RadioButton rbPrima, rbSeconda, rbTerza, rbQuarta, rbQuinta; private RadioGroup radioGroup; private Button btRegistrati, btAnnulla; private String nome, cognome, username, password, classe, tipologia = "alunno"; public static final String SharedPrefName = "Shared"; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_registrazione_alunno); radioGroup = (RadioGroup)findViewById(R.id.radioGroup); tietNome = (TextInputEditText)findViewById(R.id.tietNome); tietCognome = (TextInputEditText)findViewById(R.id.tietCognome); tietUsername = (TextInputEditText)findViewById(R.id.tietUsername); tietPassword = (TextInputEditText)findViewById(R.id.tietPassword); // tietClasse =(TextInputEditText)findViewById(R.id.tietClasse); btRegistrati = (Button)findViewById(R.id.btRegistrati); btRegistrati.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { tietNome.setError(null); tietCognome.setError(null); tietUsername.setError(null); tietPassword.setError(null); if(tietNome.length() != 0){ nome = tietNome.getText().toString(); if(nome.trim().length() == 0){ tietNome.setError("Non può essere vuoto"); return; } }else{ tietNome.setError("Non può essere vuoto"); return; } if(tietCognome.length() != 0){ cognome = tietCognome.getText().toString(); if(cognome.trim().length() == 0){ tietCognome.setError("Non può essere vuoto"); return; } }else{ tietCognome.setError("Non può essere vuoto"); return; } if(tietUsername.length() != 0){ username = tietUsername.getText().toString(); if(username.trim().length() == 0){ tietUsername.setError("Non può essere vuoto"); return; } }else{ tietUsername.setError("Non può essere vuoto"); return; } if(tietPassword.length() != 0){ password = tietPassword.getText().toString(); if(password.trim().length() == 0){ tietPassword.setError("Non può essere vuoto"); return; } }else{ tietPassword.setError("Non può essere vuoto"); return; } try{ classe = ((RadioButton)findViewById(radioGroup.getCheckedRadioButtonId())).getText().toString(); }catch (NullPointerException e){ Toast.makeText(RegistrazioneAlunno.this, "Bisogna selezionare almeno una classe", Toast.LENGTH_SHORT).show(); return; } new AsyncTask<Object,Object,Object>(){ @Override protected Object doInBackground(Object... params) { try { String ip = LoginActivity.INDIRIZZO; int porta = 80; String nomeFile = "webservice/registrazioneUtente.php"; URL url = new URL("http",ip, porta, nomeFile); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setChunkedStreamingMode(0); connection.setDoInput(true); connection.setDoOutput(true); BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(connection.getOutputStream())); String toSend = "nome=" + nome + "&cognome=" + cognome + "&username=" + username + "&password=" + <PASSWORD> + "&tipologia=<PASSWORD>&classe=" + classe; bufferedWriter.write(toSend); bufferedWriter.flush(); Scanner scanner = new Scanner(connection.getInputStream()); String response = scanner.nextLine(); switch(response){ case " ": case "": break; case "Error:0": publishProgress("Errore nella creazione dell'utente. Già esistente?"); break; case "Error:1": publishProgress("Errore nella creazione dell'alunno. Contattare l'admin"); // si dovrebbe eliminare l'utente break; case "All done": publishProgress("Registrazione avvenuta con successo. Prego, effettuare il login."); startActivity(new Intent(RegistrazioneAlunno.this, LoginActivity.class)); finish(); break; } } catch (MalformedURLException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return null; } @Override protected void onProgressUpdate(Object... values) { Toast.makeText(RegistrazioneAlunno.this, values[0].toString(), Toast.LENGTH_SHORT).show(); } }.execute(); SharedPreferences prefs = getApplicationContext().getSharedPreferences("Shared", 0); SharedPreferences.Editor prefsEditor = prefs.edit(); prefsEditor.putString("username", RegistrazioneAlunno.this.tietUsername.getText().toString()); prefsEditor.putString("nome", RegistrazioneAlunno.this.tietNome.getText().toString()); prefsEditor.putString("cognome", RegistrazioneAlunno.this.tietCognome.getText().toString()); Log.d("DEBUG_registrazione",(radioGroup.getCheckedRadioButtonId()+"")); prefsEditor.putString("classe", radioGroup.getCheckedRadioButtonId()+""); prefsEditor.commit(); } }); btAnnulla = (Button)findViewById(R.id.btAnnulla); btAnnulla.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(RegistrazioneAlunno.this, LoginActivity.class)); finish(); } }); } @Override public void onBackPressed() { btAnnulla.performClick(); } }
066fe31932d9fa4d71f5cf970867fcd288de0221
[ "Java" ]
5
Java
Adian13/PlayForLearnPFL
517f18becd69a301b91bf71f566fb19cdddc2bff
608eeb5a1908475223805e62f854c7660e90de7b
refs/heads/master
<file_sep>dbms.name = postgresql db.fill = 0 db.runtime_config = 1 system.db.url = JDBC_DATABASE_URL system.db.user = JDBC_DATABASE_USERNAME system.db.password = <PASSWORD>_DATABASE_PASSWORD <file_sep>package yevtukh.anton.database; import yevtukh.anton.model.dao.implementations.JpaDishesDao; import yevtukh.anton.model.dao.interfaces.DishesDao; import javax.persistence.EntityManagerFactory; import javax.persistence.Persistence; import java.io.InputStream; import java.sql.*; import java.util.Properties; /** * Created by Anton on 14.10.2017. */ public class DbWorker { private final EntityManagerFactory ENTITY_MANAGER_FACTORY; private static DbWorker instance; public static DbWorker getInstance() { if (instance == null) { try { instance = new DbWorker(); } catch (Exception e) { System.out.println("Can't get instance of DbWorker"); e.printStackTrace(); } } return instance; } private DbWorker() throws Exception { InputStream inputStream = getClass().getClassLoader().getResourceAsStream("db.properties"); Properties properties = new Properties(); properties.load(inputStream); boolean runtimeConfig = "1".equals(properties.getProperty("db.runtime_config")) ? true : false; boolean fill = "1".equals(properties.getProperty("db.fill")) ? true : false; if (runtimeConfig) { properties.put("javax.persistence.jdbc.url", System.getenv(properties.getProperty("system.db.url"))); properties.put("javax.persistence.jdbc.user", System.getenv(properties.getProperty("system.db.user"))); properties.put("javax.persistence.jdbc.password", System.getenv(properties.getProperty("system.db.password"))); } ENTITY_MANAGER_FACTORY = Persistence.createEntityManagerFactory( properties.getProperty("dbms.name"), properties); if (fill) { fillDb(); } } public DishesDao createDishesDao() throws SQLException { return new JpaDishesDao(ENTITY_MANAGER_FACTORY.createEntityManager()); } private void fillDb() throws SQLException, ClassNotFoundException { if (InitData.INITIAL_DISHES != null) { DishesDao dishesDao = createDishesDao(); dishesDao.insertDishes(InitData.INITIAL_DISHES); } } public void stop() { ENTITY_MANAGER_FACTORY.close(); } } <file_sep>package yevtukh.anton.model; /** * Created by Anton on 14.10.2017. */ public class SearchParameters { private double priceFrom; private double priceTo; private boolean hasDiscount; public SearchParameters(double priceFrom, double priceTo, boolean hasDiscount) { this.priceFrom = priceFrom; this.priceTo = priceTo; this.hasDiscount = hasDiscount; } public double getPriceFrom() { return priceFrom; } public double getPriceTo() { return priceTo; } public boolean isHasDiscount() { return hasDiscount; } }
11d916909f570adcf70c24ffea9d0d637e88ff71
[ "Java", "INI" ]
3
INI
AntonYevtukh/MenyDB
2a779c3bc3f1d1ee22f6c9203c2110344afeba98
1fe94d4b7802c7a6513768752a5076c598668871
refs/heads/master
<repo_name>WasserEsser/PEExplorer<file_sep>/PEExplorer/ViewModels/Tabs/DebugTabViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class DebugTabViewModel : TabViewModelBase { [ImportingConstructor] public DebugTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/debug.ico"; public override string Text => "Debug"; } } <file_sep>/PEExplorer/ViewModels/DisassemblyViewModel.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows; using SharpDisasm; using Zodiacon.WPF; using SharpDisasm.Udis86; namespace PEExplorer.ViewModels { class DisassemblyViewModel : DialogViewModelBase { public string SymbolName { get; } public DisassemblyViewModel(Window dialog, string symbolName) : base(dialog) { SymbolName = symbolName; } public void Disassemble(byte[] code, int address, bool x64) { Disassembler.Translator.IncludeAddress = true; Disassembler.Translator.IncludeBinary = true; using(var disassem = new Disassembler(code, x64 ? ArchitectureMode.x86_64 : ArchitectureMode.x86_32, (ulong)address, true)) { Instructions = disassem.Disassemble().TakeWhileIncluding((i, c) => c < 1000 && i.Mnemonic != ud_mnemonic_code.UD_Iret && i.Mnemonic != ud_mnemonic_code.UD_Iint3). Select(i => new InstructionViewModel(i)); } } private IEnumerable<InstructionViewModel> _instructions; public IEnumerable<InstructionViewModel> Instructions { get { return _instructions; } set { SetProperty(ref _instructions, value); } } public string Title => $"Disassembly: {SymbolName}"; } } <file_sep>/PEExplorer.Core/ImportedLibrary.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Core { public class ImportedLibrary { public string LibraryName { get; internal set; } public ICollection<ImportedSymbol> Symbols { get; } = new List<ImportedSymbol>(16); } } <file_sep>/PEExplorer.Core/ImportedSymbol.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Core { public class ImportedSymbol { public string Name { get; set; } public int Hint { get; set; } public string UndecoratedName { get; set; } } } <file_sep>/PEExplorer/ViewModels/Tabs/ImportsTabViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Data; using PEExplorer.Core; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class ImportsTabViewModel : TabViewModelBase { [ImportingConstructor] public ImportsTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/import2.ico"; public override string Text => "Imports"; IEnumerable<ImportedLibrary> _imports; public IEnumerable<ImportedLibrary> Imports => _imports ?? (_imports = MainViewModel.PEParser.GetImports()); private string _searchLibrariesText; public string SearchLibrariesText { get { return _searchLibrariesText; } set { if(SetProperty(ref _searchLibrariesText, value)) { var view = CollectionViewSource.GetDefaultView(Imports); if(string.IsNullOrWhiteSpace(value)) view.Filter = null; else { var text = value.ToLower(); view.Filter = o => ((ImportedLibrary)o).LibraryName.ToLower().Contains(text); } } } } private ImportedLibrary _selectedLibrary; public ImportedLibrary SelectedLibrary { get { return _selectedLibrary; } set { if(SetProperty(ref _selectedLibrary, value)) { SearchImportsText = string.Empty; } } } private string _searchImportsText; public string SearchImportsText { get { return _searchImportsText; } set { if(SetProperty(ref _searchImportsText, value)) { if(SelectedLibrary != null) { var view = CollectionViewSource.GetDefaultView(SelectedLibrary.Symbols); if(string.IsNullOrWhiteSpace(value)) view.Filter = null; else { var text = value.ToLower(); view.Filter = o => ((ImportedSymbol)o).Name.ToLower().Contains(text); } } } } } } } <file_sep>/PEExplorer/Helpers/DateTimeHelper.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Helpers { public static class DateTimeHelper { public static DateTime FromSeconds(uint seconds) => new DateTime(1970, 1, 1) + TimeSpan.FromSeconds((double)seconds); } } <file_sep>/PEExplorer/ViewModels/Resources/ImageResourceViewModel.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Media; using PEExplorer.Core; namespace PEExplorer.ViewModels.Resources { class ImageResourceViewModel : ResourceViewModel { public ImageResourceViewModel(ResourceID id, ResourceTypeViewModel type) : base(id, type) { } ImageSource _image; public ImageSource Icon => _image ?? (_image = Type.ResourceManager.GetIconImage(ResourceId, true)); public ImageSource Cursor => _image ?? (_image = Type.ResourceManager.GetIconImage(ResourceId, false)); public ImageSource Bitmap => _image ?? (_image = Type.ResourceManager.GetBitmapImage(ResourceId)); public override bool CustomViewPossible => true; } } <file_sep>/PEExplorer/ViewModels/Resources/TextResourceViewModel.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using PEExplorer.Core; namespace PEExplorer.ViewModels.Resources { class TextResourceViewModel : ResourceViewModel { public TextResourceViewModel(ResourceID id, ResourceTypeViewModel type) : base(id, type) { } public string ManifestText => Type.MainViewModel.PEFile.GetSxSManfest(); public override bool CustomViewPossible => true; } } <file_sep>/PEExplorer/ViewModels/Tabs/ExceptionsTabViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class ExceptionsTabViewModel : TabViewModelBase { [ImportingConstructor] public ExceptionsTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/exceptions.ico"; public override string Text => "Exceptions"; } } <file_sep>/PEExplorer/ViewModels/Resources/ResourceTypeViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; using PEExplorer.Core; namespace PEExplorer.ViewModels.Resources { class ResourceTypeViewModel { public ResourceID ResourceType { get; internal set; } public ICollection<ResourceViewModel> Resources { get; } = new List<ResourceViewModel>(); public ResourceManager ResourceManager { get; } [Import] public MainViewModel MainViewModel; public ResourceTypeViewModel(ResourceManager mgr) { ResourceManager = mgr; } static readonly Dictionary<ResourceID, Type> _viewModels = new Dictionary<ResourceID, Type> { { ResourceID.Icon, typeof(ImageResourceViewModel) }, { ResourceID.Cursor, typeof(ImageResourceViewModel) }, { ResourceID.Bitmap, typeof(ImageResourceViewModel) }, { ResourceID.GroupCursor, typeof(ImageResourceViewModel) }, { ResourceID.GroupIcon, typeof(ImageResourceViewModel) }, { ResourceID.StringTable, typeof(StringResourceViewModel) }, { ResourceID.Manifest, typeof(TextResourceViewModel) }, }; internal ResourceViewModel CreateResourceViewModel(ResourceID resource) { if(!ResourceType.IsStandard) return new ResourceViewModel(resource, this); Type viewModelType; if(!_viewModels.TryGetValue(ResourceType, out viewModelType)) return new ResourceViewModel(resource, this); return (ResourceViewModel)Activator.CreateInstance(viewModelType, resource, this); } public bool CustomViewPossible => false; } } <file_sep>/PEExplorer/Constants.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer { class Constants { public const string AppName = "PE Explorer v0.6"; public const string Copyright = "by <NAME> (C)2016-2017"; public static string EmptyTitle = $"{AppName} {Copyright}"; } } <file_sep>/PEExplorer/ViewModels/Tabs/ImportAddressTableTabViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; using PEExplorer.Core; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class ImportAddressTableTabViewModel : TabViewModelBase { [ImportingConstructor] public ImportAddressTableTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/iat.ico"; public override string Text => "IAT"; ICollection<ImportedSymbol> _imports; public ICollection<ImportedSymbol> Imports => _imports ?? (_imports = MainViewModel.PEParser.GetImportAddressTable()); } } <file_sep>/PEExplorer/ViewModels/TabViewModelBase.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Input; using Prism.Commands; using Prism.Mvvm; namespace PEExplorer.ViewModels { abstract class TabViewModelBase : BindableBase { public abstract string Icon { get; } public abstract string Text { get; } public virtual bool CanClose => true; protected MainViewModel MainViewModel { get; } protected TabViewModelBase(MainViewModel vm) { MainViewModel = vm; } public string ToDecHex(ulong n) => $"{n} (0x{n:X})"; } } <file_sep>/PEExplorer/Extensions.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer { static class Extensions { public static IEnumerable<T> TakeWhileIncluding<T>(this IEnumerable<T> collection, Func<T, bool> predicate) { var enumerator = collection.GetEnumerator(); while(enumerator.MoveNext()) { yield return enumerator.Current; if(!predicate(enumerator.Current)) break; } } public static IEnumerable<T> TakeWhileIncluding<T>(this IEnumerable<T> collection, Func<T, int, bool> predicate) { var enumerator = collection.GetEnumerator(); int i = 0; while (enumerator.MoveNext()) { yield return enumerator.Current; if (!predicate(enumerator.Current, i)) break; ++i; } } } } <file_sep>/PEExplorer/ViewModels/Tabs/ExportsTabViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.IO; using System.IO.MemoryMappedFiles; using System.Linq; using System.Runtime.InteropServices; using System.Text; using System.Threading.Tasks; using System.Windows.Data; using System.Windows.Input; using PEExplorer.Core; using PEExplorer.Views; using Prism.Commands; using Zodiacon.WPF; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class ExportsTabViewModel : TabViewModelBase { [ImportingConstructor] public ExportsTabViewModel(MainViewModel mainViewModel) : base(mainViewModel) { DisassembleCommand = new DelegateCommand(() => { var symbol = SelectedItem; var vm = DialogService.CreateDialog<DisassemblyViewModel, DisassemblyView>(symbol.Name); var address = (int)symbol.Address; MainViewModel.Accessor.ReadArray(MainViewModel.PEHeader.RvaToFileOffset(address), _bytes, 0, _bytes.Length); vm.Disassemble(_bytes, address, MainViewModel.PEHeader.IsPE64); vm.Show(); }, () => SelectedItem != null && string.IsNullOrEmpty(SelectedItem.ForwardName)).ObservesProperty(() => SelectedItem); } public override string Icon => "/icons/export1.ico"; public override string Text => "Exports"; IEnumerable<ExportedSymbol> _exports; public unsafe IEnumerable<ExportedSymbol> Exports { get { if(_exports == null) { _exports = MainViewModel.PEParser.GetExports(); } return _exports; } } private string _searchText; public string SearchText { get { return _searchText; } set { if(SetProperty(ref _searchText, value)) { var view = CollectionViewSource.GetDefaultView(Exports); if(string.IsNullOrWhiteSpace(value)) view.Filter = null; else { var lower = value.ToLower(); view.Filter = o => { var symbol = (ExportedSymbol)o; return symbol.Name.ToLower().Contains(lower) || (symbol.ForwardName != null && symbol.ForwardName.ToLower().Contains(lower)); }; } } } } [Import] IDialogService DialogService; static byte[] _bytes = new byte[1 << 12]; public ICommand DisassembleCommand { get; } private ExportedSymbol _selectedItem; public ExportedSymbol SelectedItem { get { return _selectedItem; } set { SetProperty(ref _selectedItem, value); } } } } <file_sep>/PEExplorer.Core/DirectoryTables.cs using System; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Core { [StructLayout(LayoutKind.Sequential)] public struct IMAGE_EXPORT_DIRECTORY { public uint Characteristics; public uint TimeDateStamp; public ushort MajorVersion; public ushort MinorVersion; public int Name; public int Base; public int NumberOfFunctions; public int NumberOfNames; public int AddressOfFunctions; // RVA from base of image public int AddressOfNames; // RVA from base of image public int AddressOfOrdinals; // RVA from base of image } [StructLayout(LayoutKind.Sequential)] public struct IMAGE_RESOURCE_DIRECTORY { public int Characteristics; public int TimeDateStamp; public short MajorVersion; public short MinorVersion; public ushort NumberOfNamedEntries; public ushort NumberOfIdEntries; // IMAGE_RESOURCE_DIRECTORY_ENTRY DirectoryEntries[]; }; [StructLayout(LayoutKind.Sequential)] public struct IMAGE_IMPORT_DIRECTORY { public int ImportLookupTable; public int TimeDateStamp; public int ForwarderChain; public int NameRva; public int ImportAddressTable; } [StructLayout(LayoutKind.Sequential)] public struct _IMAGE_DEBUG_DIRECTORY { public uint Characteristics; public uint TimeDateStamp; public ushort MajorVersion; public ushort MinorVersion; public ImageDebugType Type; public uint SizeOfData; public uint AddressOfRawData; public uint PointerToRawData; } [StructLayout(LayoutKind.Sequential)] public struct IMAGE_LOAD_CONFIG_DIRECTORY64 { public uint Size; public uint TimeDateStamp; public ushort MajorVersion; public ushort MinorVersion; public uint GlobalFlagsClear; public uint GlobalFlagsSet; public uint CriticalSectionDefaultTimeout; public ulong DeCommitFreeBlockThreshold; public ulong DeCommitTotalFreeThreshold; public ulong LockPrefixTable; public ulong MaximumAllocationSize; public ulong VirtualMemoryThreshold; public ulong ProcessAffinityMask; public uint ProcessHeapFlags; public ushort CSDVersion; public ushort Reserved1; public ulong EditList; public ulong SecurityCookie; public ulong SEHandlerTable; public ulong SEHandlerCount; public ulong GuardCFCheckFunctionPointer; // VA public ulong GuardCFDispatchFunctionPointer; // VA public ulong GuardCFFunctionTable; // VA public ulong GuardCFFunctionCount; public ControlFlowGuardFlags GuardFlags; public IMAGE_LOAD_CONFIG_CODE_INTEGRITY CodeIntegrity; uint GuardAddressTakenIatEntryTable; // VA uint GuardAddressTakenIatEntryCount; uint GuardLongJumpTargetTable; // VA uint GuardLongJumpTargetCount; uint DynamicValueRelocTable; // VA uint HybridMetadataPointer; // VA } [StructLayout(LayoutKind.Sequential)] public struct IMAGE_LOAD_CONFIG_DIRECTORY32 { public uint Size; public uint TimeDateStamp; public ushort MajorVersion; public ushort MinorVersion; public uint GlobalFlagsClear; public uint GlobalFlagsSet; public uint CriticalSectionDefaultTimeout; public uint DeCommitFreeBlockThreshold; public uint DeCommitTotalFreeThreshold; public uint LockPrefixTable; public uint MaximumAllocationSize; public uint VirtualMemoryThreshold; public uint ProcessAffinityMask; public uint ProcessHeapFlags; public ushort CSDVersion; public ushort Reserved1; public uint EditList; public uint SecurityCookie; public uint SEHandlerTable; public uint SEHandlerCount; public uint GuardCFCheckFunctionPointer; // VA public uint GuardCFDispatchFunctionPointer; // VA public uint GuardCFFunctionTable; // VA public uint GuardCFFunctionCount; public ControlFlowGuardFlags GuardFlags; public IMAGE_LOAD_CONFIG_CODE_INTEGRITY CodeIntegrity; ulong GuardAddressTakenIatEntryTable; // VA ulong GuardAddressTakenIatEntryCount; ulong GuardLongJumpTargetTable; // VA ulong GuardLongJumpTargetCount; ulong DynamicValueRelocTable; // VA ulong HybridMetadataPointer; // VA } [StructLayout(LayoutKind.Sequential)] public struct IMAGE_LOAD_CONFIG_CODE_INTEGRITY { public ushort Flags; public ushort Catalog; public uint CatalogOffset; public uint Reserved; } }<file_sep>/PEExplorer/Settings.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer { class Settings { public bool IsTopmost { get; set; } public string AccentColor { get; set; } } } <file_sep>/PEExplorer/ViewModels/TreeViewItemViewModel.cs using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Input; using Prism.Commands; using Prism.Mvvm; namespace PEExplorer.ViewModels { class TreeViewItemViewModel : BindableBase { protected MainViewModel MainViewModel { get; } public TreeViewItemViewModel(MainViewModel vm) { MainViewModel = vm; } private string _text; public string Text { get { return _text ?? Tab.Text; } set { SetProperty(ref _text, value); } } private string _icon; public string Icon { get { return _icon ?? Tab.Icon; } set { SetProperty(ref _icon, value); } } public TabViewModelBase Tab { get; set; } ObservableCollection<TreeViewItemViewModel> _items; public IList<TreeViewItemViewModel> Items => _items ?? (_items = new ObservableCollection<TreeViewItemViewModel>()); private bool _isExpanded; public bool IsExpanded { get { return _isExpanded; } set { SetProperty(ref _isExpanded, value); } } } } <file_sep>/PEExplorer/ViewModels/MainViewModel.cs using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.ComponentModel.Composition; using System.ComponentModel.Composition.Hosting; using System.IO; using System.IO.MemoryMappedFiles; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows; using System.Windows.Input; using Microsoft.Diagnostics.Runtime.Utilities; using PEExplorer.Core; using PEExplorer.Helpers; using PEExplorer.ViewModels.Tabs; using Prism.Commands; using Prism.Mvvm; using Zodiacon.WPF; using System.Diagnostics; namespace PEExplorer.ViewModels { [Export] class MainViewModel : BindableBase { public string Title => PathName == null ? null : $"{Constants.AppName} {Constants.Copyright} ({PathName}) "; readonly ObservableCollection<TabViewModelBase> _tabs = new ObservableCollection<TabViewModelBase>(); readonly ObservableCollection<string> _recentFiles = new ObservableCollection<string>(); public IList<TabViewModelBase> Tabs => _tabs; public IList<string> RecentFiles => _recentFiles; static MainViewModel _firstViewModel; public MainViewModel() { if (_firstViewModel == null) _firstViewModel = this; var recentFiles = Serializer.Load<ObservableCollection<string>>("RecentFiles"); if (recentFiles != null) _recentFiles = recentFiles; } internal void Close() { Serializer.Save(_recentFiles, "RecentFiles"); } public void SelectTab(TabViewModelBase tab) { if (!Tabs.Contains(tab)) Tabs.Add(tab); SelectedTab = tab; } private TabViewModelBase _selectedTab; public TabViewModelBase SelectedTab { get { return _selectedTab; } set { SetProperty(ref _selectedTab, value); } } private string _fileName; private PEHeader _peHeader; public PEFileParser PEParser { get; private set; } public string PathName { get; set; } public PEHeader PEHeader { get { return _peHeader; } set { SetProperty(ref _peHeader, value); } } public string FileName { get { return _fileName; } set { SetProperty(ref _fileName, value); } } [Import] IFileDialogService FileDialogService; [Import] IMessageBoxService MessageBoxService; [Import] public CompositionContainer Container { get; private set; } ObservableCollection<TreeViewItemViewModel> _treeRoot = new ObservableCollection<TreeViewItemViewModel>(); public IList<TreeViewItemViewModel> TreeRoot => _treeRoot; public ICommand OpenCommand => new DelegateCommand<string>(param => { try { var filename = FileDialogService.GetFileForOpen("PE Files (*.exe;*.dll;*.ocx;*.obj;*.lib;*.sys)|*.exe;*.sys;*.dll;*.ocx;*.obj;*.lib", "Select File"); if (filename == null) return; OpenInternal(filename, param == "new"); } catch (Exception ex) { MessageBoxService.ShowMessage(ex.Message, "PE Explorer"); } }, param => param == null || PEHeader != null).ObservesProperty(() => PEHeader); private void BuildTree() { TreeRoot.Clear(); var root = new TreeViewItemViewModel(this) { Text = FileName, Icon = "/icons/data.ico", IsExpanded = true }; TreeRoot.Add(root); var generalTab = Container.GetExportedValue<GeneralTabViewModel>(); root.Items.Add(new TreeViewItemViewModel(this) { Text = "(General)", Tab = generalTab }); Tabs.Add(generalTab); var sectionsTab = Container.GetExportedValue<SectionsTabViewModel>(); root.Items.Add(new TreeViewItemViewModel(this) { Tab = sectionsTab }); if (PEHeader.ExportDirectory.VirtualAddress > 0) { var exportTab = Container.GetExportedValue<ExportsTabViewModel>(); root.Items.Add(new TreeViewItemViewModel(this) { Text = "Exports (.edata)", Tab = exportTab }); } if (PEHeader.ImportDirectory.VirtualAddress > 0) { var importsTab = Container.GetExportedValue<ImportsTabViewModel>(); root.Items.Add(new TreeViewItemViewModel(this) { Text = "Imports (.idata)", Tab = importsTab }); } //if(PEHeader.ImportAddressTableDirectory.VirtualAddress > 0) { // var iatTab = Container.GetExportedValue<ImportAddressTableTabViewModel>(); // root.Items.Add(new TreeViewItemViewModel(this) { Text = "Import Address Table", Icon = "/icons/iat.ico", Tab = iatTab }); //} if (PEHeader.ResourceDirectory.VirtualAddress > 0) root.Items.Add(new TreeViewItemViewModel(this) { Text = "Resources (.rsrc)", Icon = "/icons/resources.ico", Tab = Container.GetExportedValue<ResourcesTabViewModel>() }); if (PEHeader.DebugDirectory.VirtualAddress > 0) { var debugTab = Container.GetExportedValue<DebugTabViewModel>(); root.Items.Add(new TreeViewItemViewModel(this) { Text = "Debug (.debug)", Tab = debugTab }); } //if(PEHeader.ComDescriptorDirectory.VirtualAddress > 0) { // root.Items.Add(new TreeViewItemViewModel(this) { // Text = "CLR", // Icon = "/icons/cpu.ico", // Tab = Container.GetExportedValue<CLRTabViewModel>() // }); //} if (PEHeader.LoadConfigurationDirectory.VirtualAddress > 0) { var configTab = Container.GetExportedValue<LoadConfigTabViewModel>(); root.Items.Add(new TreeViewItemViewModel(this) { Text = "Load Config", Icon = "/icons/config.ico", Tab = configTab }); } if((_peHeader.Characteristics & (ushort)ImageCharacteristics.DllFile) > 0) { root.Items.Add(new TreeViewItemViewModel(this) { Tab = Container.GetExportedValue<DependenciesTabViewModel>() }); } SelectedTab = generalTab; } public DelegateCommandBase OpenDroppedFiles => new DelegateCommand<string[]>(files => { for(int i = 0; i < files.Length; i++) OpenInternal(files[i], i > 0); }); public ICommand SelectTabCommand => new DelegateCommand<TreeViewItemViewModel>(item => { if (item != null) SelectTab(item.Tab); }); MemoryMappedFile _mmf; FileStream _stm; public MemoryMappedViewAccessor Accessor { get; private set; } private void MapFile() { _mmf = MemoryMappedFile.CreateFromFile(_stm, null, 0, MemoryMappedFileAccess.Read, null, HandleInheritability.None, false); Accessor = _mmf.CreateViewAccessor(0, 0, MemoryMappedFileAccess.Read); PEParser = new PEFileParser(PEFile, PathName, Accessor); } public PEFile PEFile { get; private set; } public void OpenInternal(string filename, bool newWindow) { MessageBoxService.SetOwner(Application.Current.MainWindow); if (newWindow) { Process.Start(Process.GetCurrentProcess().MainModule.FileName, filename); return; } CloseCommand.Execute(null); try { PEFile = new PEFile(_stm = File.Open(filename, FileMode.Open, FileAccess.Read, FileShare.ReadWrite), false); PEHeader = PEFile.Header; if (PEHeader == null) throw new InvalidOperationException("No PE header detected."); FileName = Path.GetFileName(filename); PathName = filename; RaisePropertyChanged(nameof(Title)); MapFile(); BuildTree(); RecentFiles.Remove(PathName); RecentFiles.Insert(0, PathName); if (RecentFiles.Count > 10) RecentFiles.RemoveAt(RecentFiles.Count - 1); } catch (Exception ex) { MessageBoxService.ShowMessage($"Error: {ex.Message}", Constants.AppName); } } public ICommand ExitCommand => new DelegateCommand(() => Application.Current.Shutdown()); public ICommand CloseCommand => new DelegateCommand(() => { if (PEFile != null && !PEFile.Disposed) PEFile.Dispose(); FileName = null; PEHeader = null; if (Accessor != null) Accessor.Dispose(); if (_mmf != null) _mmf.Dispose(); _tabs.Clear(); _treeRoot.Clear(); RaisePropertyChanged(nameof(Title)); }); public ICommand CloseTabCommand => new DelegateCommand<TabViewModelBase>(tab => Tabs.Remove(tab)); public ICommand OpenRecentFileCommand => new DelegateCommand<string>(filename => OpenInternal(filename, false)); private bool _isTopmost; public bool IsTopmost { get { return _isTopmost; } set { if (SetProperty(ref _isTopmost, value)) { var win = Application.Current.MainWindow; if (win != null) win.Topmost = value; } } } public ICommand ViewGeneralCommand => new DelegateCommand(() => SelectTabCommand.Execute(TreeRoot[0].Items.SingleOrDefault(item => item.Tab is GeneralTabViewModel)), () => PEHeader != null).ObservesProperty(() => PEHeader); public ICommand ViewSectionsCommand => new DelegateCommand(() => SelectTabCommand.Execute(TreeRoot[0].Items.SingleOrDefault(item => item.Tab is SectionsTabViewModel)), () => PEHeader != null).ObservesProperty(() => PEHeader); public ICommand ViewExportsCommand => new DelegateCommand(() => SelectTabCommand.Execute(TreeRoot[0].Items.SingleOrDefault(item => item.Tab is ExportsTabViewModel)), () => PEHeader?.ExportDirectory.VirtualAddress > 0).ObservesProperty(() => PEHeader); public ICommand ViewImportsCommand => new DelegateCommand(() => SelectTabCommand.Execute(TreeRoot[0].Items.SingleOrDefault(item => item.Tab is ImportsTabViewModel)), () => PEHeader?.ImportDirectory.VirtualAddress > 0).ObservesProperty(() => PEHeader); public ICommand ViewResourcesCommand => new DelegateCommand(() => SelectTabCommand.Execute(TreeRoot[0].Items.SingleOrDefault(item => item.Tab is ResourcesTabViewModel)), () => PEHeader?.ResourceDirectory.VirtualAddress > 0).ObservesProperty(() => PEHeader); public ICommand ViewDebugCommand => new DelegateCommand(() => SelectTabCommand.Execute(TreeRoot[0].Items.SingleOrDefault(item => item.Tab is DebugTabViewModel)), () => PEHeader?.DebugDirectory.VirtualAddress > 0).ObservesProperty(() => PEHeader); public ICommand ViewLoadConfigCommand => new DelegateCommand(() => SelectTabCommand.Execute(TreeRoot[0].Items.SingleOrDefault(item => item.Tab is LoadConfigTabViewModel)), () => PEHeader?.LoadConfigurationDirectory.VirtualAddress > 0).ObservesProperty(() => PEHeader); } } <file_sep>/PEExplorer/ViewModels/Resources/ResourceViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Globalization; using System.IO; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows; using System.Windows.Input; using System.Windows.Interop; using System.Windows.Media; using System.Windows.Media.Imaging; using PEExplorer.Core; using Prism.Commands; using Prism.Mvvm; using Zodiacon.WPF; namespace PEExplorer.ViewModels.Resources { class ResourceViewModel : BindableBase { public ResourceID ResourceId { get; } public ResourceTypeViewModel Type { get; } public ResourceViewModel(ResourceID id, ResourceTypeViewModel type) { ResourceId = id; Type = type; } byte[] _bytes; private int _chunk = 1; public int Chunk { get { return _chunk; } set { if(SetProperty(ref _chunk, value)) { RaisePropertyChanged(nameof(HexText)); } } } private int _lineWidth = 16; public int LineWidth { get { return _lineWidth; } set { if(SetProperty(ref _lineWidth, value)) { RaisePropertyChanged(nameof(HexText)); } } } static Dictionary<int, Func<byte[], int, string>> _converters = new Dictionary<int, Func<byte[], int, string>> { { 1, (arr, index) => arr[index].ToString("X2") }, { 2, (arr, index) => BitConverter.ToUInt16(arr, index).ToString("X4") }, { 4, (arr, index) => BitConverter.ToUInt32(arr, index).ToString("X8") }, { 8, (arr, index) => BitConverter.ToUInt64(arr, index).ToString("X16") }, }; public int ResourceSize => (_bytes ?? (_bytes = GetContents())).Length; public byte[] GetContents() => _bytes ?? (_bytes = Type.ResourceManager.GetResourceContent(ResourceId, Type.ResourceType)); public string HexText { get { var bytes = GetContents(); if(bytes == null) return string.Empty; var encoding = IsASCII ? Encoding.ASCII : Encoding.Unicode; var count = ResourceSize; var sb = new StringBuilder(1024); for(int i = 0; i < count; i += Chunk) { if(i % LineWidth == 0) sb.Append($"{i:X4}: "); if(i + Chunk > count) continue; sb.Append(_converters[Chunk](bytes, i)).Append(" "); var lastLine = i == count - Chunk; if(i % LineWidth == LineWidth - Chunk || lastLine) { // add ASCII/Unicode characters var str = new string(encoding.GetString(_bytes, lastLine ? i - (count % LineWidth) + 1 : i - LineWidth + Chunk, lastLine ? count % LineWidth : LineWidth). Select(ch => char.GetUnicodeCategory(ch) == UnicodeCategory.Control || char.GetUnicodeCategory(ch) == UnicodeCategory.Format ? '.' : ch) .ToArray()); if(lastLine) sb.Append(new string(' ', (LineWidth - str.Length * (IsASCII ? 1 : 2)) / Chunk * (Chunk * 2 + 1))); sb.Append(" ").Append(str).AppendLine(); } } return sb.ToString(); } } private bool _rawView; public bool RawView { get { return _rawView; } set { SetProperty(ref _rawView, value); } } private bool _is8Bytes; public bool Is8Bytes { get { return _is8Bytes; } set { if(SetProperty(ref _is8Bytes, value) && value) { Is16Bytes = Is32Bytes = false; LineWidth = 8; } } } public virtual bool CustomViewPossible => false; private bool _is16Bytes = true; public bool Is16Bytes { get { return _is16Bytes; } set { if(SetProperty(ref _is16Bytes, value) && value) { Is8Bytes = Is32Bytes = false; LineWidth = 16; } } } private bool _is32Bytes; public bool Is32Bytes { get { return _is32Bytes; } set { if(SetProperty(ref _is32Bytes, value) && value) { Is8Bytes = Is16Bytes = false; LineWidth = 32; } } } private bool _isASCII = true; public bool IsASCII { get { return _isASCII; } set { if(SetProperty(ref _isASCII, value) && value) { IsUTF16 = false; RaisePropertyChanged(nameof(HexText)); } } } private bool _isUTF16; public bool IsUTF16 { get { return _isUTF16; } set { if(SetProperty(ref _isUTF16, value) && value) { IsASCII = false; RaisePropertyChanged(nameof(HexText)); }; } } private bool _is1Chunk = true; public bool Is1Chunk { get { return _is1Chunk; } set { if(SetProperty(ref _is1Chunk, value) && value) { Is2Chunk = Is4Chunk = Is8Chunk = false; Chunk = 1; } } } private bool _is2Chunk; public bool Is2Chunk { get { return _is2Chunk; } set { if(SetProperty(ref _is2Chunk, value) && value) { Is1Chunk = Is4Chunk = Is8Chunk = false; Chunk = 2; } } } private bool _is4Chunk; public bool Is4Chunk { get { return _is4Chunk; } set { if(SetProperty(ref _is4Chunk, value) && value) { Is2Chunk = Is1Chunk = Is8Chunk = false; Chunk = 4; } } } private bool _is8Chunk; public bool Is8Chunk { get { return _is8Chunk; } set { if(SetProperty(ref _is8Chunk, value) && value) { Is2Chunk = Is4Chunk = Is1Chunk = false; Chunk = 8; } } } } } <file_sep>/PEExplorer/Helpers/Serializer.cs using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Runtime.Serialization; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Helpers { static class Serializer { public static void Save<T>(T obj, Stream stm) where T : class { var writer = new DataContractSerializer(typeof(T)); writer.WriteObject(stm, obj); } public static void Save<T>(T obj, string filename) where T : class { var path = GetPath(filename); try { using(var stm = File.Open(path, FileMode.Create)) Save(obj, stm); } catch { } } public static T Load<T>(Stream stm) where T : class { var reader = new DataContractSerializer(typeof(T)); return reader.ReadObject(stm) as T; } public static T Load<T>(string filename) where T : class { var path = GetPath(filename); try { using(var stm = File.Open(path, FileMode.Open)) return Load<T>(stm); } catch { return null; } } private static string GetPath(string filename) { var path = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData, Environment.SpecialFolderOption.Create) + @"\PEExplorer"; if(!Directory.Exists(path)) Directory.CreateDirectory(path); path += "\\" + filename; return path; } } } <file_sep>/PEExplorer/Converters/ResourceTemplateSelector.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows; using System.Windows.Controls; using PEExplorer.Core; using PEExplorer.ViewModels; using PEExplorer.ViewModels.Resources; namespace PEExplorer.Converters { class ResourceTemplateSelector : DataTemplateSelector { public DataTemplate DefaultTemplate { get; set; } static readonly DataTemplate _empty = new DataTemplate(); public override DataTemplate SelectTemplate(object item, DependencyObject container) { if(item != null) { if(item is ResourceTypeViewModel) return _empty; var resourceId = item as ResourceViewModel; if(resourceId != null) { if(resourceId.Type.ResourceType.IsStandard) { DataTemplate template; template = ((FrameworkElement)container).TryFindResource($"ResourceType{resourceId.Type.ResourceType.Id}") as DataTemplate; if(template != null) return template; if(DefaultTemplate != null) return DefaultTemplate; } else return DefaultTemplate; } } return base.SelectTemplate(item, container); } } } <file_sep>/PEExplorer/ViewModels/Resources/StringResourceViewModel.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using PEExplorer.Core; namespace PEExplorer.ViewModels.Resources { class StringResourceViewModel : ResourceViewModel { public StringResourceViewModel(ResourceID id, ResourceTypeViewModel type) : base(id, type) { } ICollection<StringResource> _strings; public ICollection<StringResource> Strings => _strings ?? (_strings = Type.ResourceManager.GetStringTableContent(ResourceId)); public override bool CustomViewPossible => true; } } <file_sep>/PEExplorer/ViewModels/Tabs/LoadConfigTabViewModel.cs using PEExplorer.Core; using PEExplorer.Helpers; using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class LoadConfigTabViewModel : TabViewModelBase { LoadConfiguration _loadConfig; [ImportingConstructor] public LoadConfigTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/config.ico"; public override string Text => "Load Config"; GenericProperty[] _properties; ExportedSymbol[] _cfgFunctions; public ExportedSymbol[] CFGFunctions { get { if (_cfgFunctions == null) { _loadConfig.GetCFGFunctions().ContinueWith(t => { _cfgFunctions = t.Result.ToArray(); RaisePropertyChanged(nameof(CFGFunctions)); }).ConfigureAwait(true); } return _cfgFunctions; } } public GenericProperty[] Properties { get { if (_properties == null) { _loadConfig = MainViewModel.PEParser.GetLoadConfiguration(); _properties = new GenericProperty[] { new GenericProperty { Name = "Time Stamp", Value = _loadConfig.TimeDateStamp.ToString(), Info = DateTimeHelper.FromSeconds(_loadConfig.TimeDateStamp).ToString() }, new GenericProperty { Name = "Major Version", Value = _loadConfig.MajorVersion.ToString() }, new GenericProperty { Name = "Minor Version", Value = _loadConfig.MinorVersion.ToString() }, new GenericProperty { Name = "CFG Check Function Pointer", Value = ToDecHex(_loadConfig.CFGCheckFunctionPointer) }, new GenericProperty { Name = "CFG Dispatch Function Pointer", Value = ToDecHex(_loadConfig.CFGDispatchFunctionPointer) }, new GenericProperty { Name = "CFG Flags", Value = ToDecHex((ulong)_loadConfig.GuardFlags), Info = _loadConfig.GuardFlags.ToString() }, new GenericProperty { Name = "CFG Function Table", Value = ToDecHex(_loadConfig.CFGFunctionTable) }, new GenericProperty { Name = "CFG Function Count", Value = _loadConfig.CFGFunctionCount.ToString() }, }; } return _properties; } } } } <file_sep>/PEExplorer/ViewModels/Tabs/SectionsTabViewModel.cs using PEExplorer.Core; using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class SectionsTabViewModel : TabViewModelBase { [ImportingConstructor] public SectionsTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/sections.ico"; public override string Text => "Sections"; ICollection<SectionData> _sections; public ICollection<SectionData> Sections => _sections ?? (_sections = MainViewModel.PEParser.GetSectionHeaders()); } } <file_sep>/PEExplorer.Core/SectionHeader.cs using System; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Core { [Flags] public enum SectionFlags : uint { NoPad = 8, Code = 0x20, InitializedData = 0x40, UninitializedData = 0x80, Other = 0x100, Info = 0x200, Remove = 0x800, Comdat = 0x1000, GPRel = 0x80000, Align1Byte = 0x100000, Align2Bytes = 0x200000, ExtendedReloc = 0x1000000, Discardable = 0x2000000, NotCached = 0x4000000, NotPaged = 0x8000000, Shared = 0x10000000, Execute = 0x20000000, Read = 0x40000000, Write = 0x80000000, } [StructLayout(LayoutKind.Sequential)] public unsafe struct SectionHeader { public string Name { get { fixed (byte* ptr = NameBytes) { if(ptr[7] == 0) return Marshal.PtrToStringAnsi((IntPtr)ptr); else return Marshal.PtrToStringAnsi((IntPtr)ptr, 8); } } } public fixed byte NameBytes[8]; public uint VirtualSize; public uint VirtualAddress; public uint SizeOfRawData; public uint PointerToRawData; public uint PointerToRelocations; public uint PointerToLinenumbers; public ushort NumberOfRelocations; public ushort NumberOfLinenumbers; public SectionFlags Characteristics; }; } <file_sep>/PEExplorer/ViewModels/Tabs/CLRTabViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.IO; using System.Linq; using System.Reflection; using System.Text; using System.Threading.Tasks; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class CLRTabViewModel : TabViewModelBase { [ImportingConstructor] public CLRTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/cpu.ico"; public override string Text => "CLR"; } } <file_sep>/PEExplorer.Core/SectionData.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Core { public class SectionData { SectionHeader _header; internal SectionData(SectionHeader header) { _header = header; } public string Name => _header.Name; public uint VirtualSize => _header.VirtualSize; public uint VirtualAddress => _header.VirtualAddress; public uint SizeOfRawData => _header.SizeOfRawData; public uint PointerToRawData => _header.PointerToRawData; public uint PointerToRelocations => _header.PointerToRelocations; public uint PointerToLineNumbers => _header.PointerToLinenumbers; public ushort NumberOfRelocations => _header.NumberOfRelocations; public ushort NumberOfLineNumbers => _header.NumberOfLinenumbers; public SectionFlags Characteristics => _header.Characteristics; } } <file_sep>/PEExplorer.Core/PEFileParser.cs using System; using System.Collections.Generic; using System.IO.MemoryMappedFiles; using System.Linq; using System.Runtime.InteropServices; using System.Security; using System.Text; using System.Threading.Tasks; using Microsoft.Diagnostics.Runtime.Utilities; using System.Diagnostics; using System.IO; namespace PEExplorer.Core { public class PEFileParser : IDisposable { public readonly PEHeader Header; public readonly PEFile PEFile; internal readonly MemoryMappedViewAccessor Accessor; readonly bool _isOwner; readonly MemoryMappedFile _memFile; readonly FileStream _stm; public string Filename { get; } public PEFileParser(PEFile file, string filename, MemoryMappedViewAccessor accessor = null) { PEFile = file; Header = file.Header; if(accessor == null) { _stm = File.OpenRead(filename); _memFile = MemoryMappedFile.CreateFromFile(_stm, null, 0, MemoryMappedFileAccess.Read, null, HandleInheritability.None, false); accessor = _memFile.CreateViewAccessor(0, 0, MemoryMappedFileAccess.Read); _isOwner = true; } Accessor = accessor; Filename = filename; } ImportedSymbol GetSymbolFromImport(int pointer) { bool pe64 = Header.IsPE64; var ordinal = -1; var nameRva = 0; if(pe64) { var lvalue = Accessor.ReadUInt64(pointer); if(lvalue == 0) return null; var isOrdinal = (lvalue & (1UL << 63)) > 0; if(isOrdinal) ordinal = (ushort)(lvalue & 0xffff); else nameRva = (int)(lvalue & ((1L << 31) - 1)); } else { var ivalue = Accessor.ReadUInt32(pointer); if(ivalue == 0) return null; if((ivalue & 0x80000000) > 0) ordinal = (ushort)(ivalue & 0xffff); else nameRva = (int)(ivalue & ((1L << 31) - 1)); } if(nameRva > 0) { var offset2 = Header.RvaToFileOffset(nameRva); var hint = Accessor.ReadUInt16(offset2); var chars = new List<byte>(); for(;;) { var ch = Accessor.ReadByte(offset2 + 2 + chars.Count); if(ch == 0) { var symbol = new ImportedSymbol { Name = Encoding.ASCII.GetString(chars.ToArray()), Hint = hint, }; if(symbol.Name.Contains("@@")) symbol.UndecoratedName = GetUndecoratedName(symbol.Name); return symbol; } chars.Add(ch); }; } return null; } public ICollection<ImportedSymbol> GetImportAddressTable() { var dir = Header.ImportAddressTableDirectory; var offset = Header.RvaToFileOffset(dir.VirtualAddress); var pe64 = Header.IsPE64; var size = pe64 ? 8 : 4; var symbols = new List<ImportedSymbol>(16); var pointer = offset; for(;;) { var symbol = GetSymbolFromImport(pointer); if(symbol == null) break; symbols.Add(symbol); pointer += size; } return symbols; } public unsafe ICollection<ImportedLibrary> GetImports() { var dir = Header.ImportDirectory; if(dir.Size == 0) return null; var offset = Header.RvaToFileOffset(dir.VirtualAddress); var pe64 = Header.IsPE64; var size = pe64 ? 8 : 4; var imports = new List<ImportedLibrary>(8); for(;;) { IMAGE_IMPORT_DIRECTORY importDirectory; Accessor.Read(offset, out importDirectory); if(importDirectory.ImportLookupTable == 0) importDirectory.ImportLookupTable = importDirectory.ImportAddressTable; if(importDirectory.ImportLookupTable == 0) break; ImportedLibrary library = null; var importLookupTable = Header.RvaToFileOffset(importDirectory.ImportLookupTable); var hintNameTable = Header.RvaToFileOffset(importDirectory.ImportAddressTable); var nameOffset = Header.RvaToFileOffset(importDirectory.NameRva); var pointer = importLookupTable; for(;;) { var ordinal = -1; var nameRva = 0; if(pe64) { var lvalue = Accessor.ReadUInt64(pointer); if(lvalue == 0) break; var isOrdinal = (lvalue & (1UL << 63)) > 0; if(isOrdinal) ordinal = (ushort)(lvalue & 0xffff); else nameRva = (int)(lvalue & ((1L << 31) - 1)); } else { var ivalue = Accessor.ReadUInt32(pointer); if(ivalue == 0) break; if((ivalue & 0x80000000) > 0) ordinal = (ushort)(ivalue & 0xffff); else nameRva = (int)(ivalue & ((1L << 31) - 1)); } if(library == null) { var bytes = new sbyte[128]; fixed (sbyte* p = bytes) { Accessor.ReadArray(nameOffset, bytes, 0, bytes.Length); library = new ImportedLibrary { LibraryName = new string(p) }; } } if(nameRva > 0) { var offset2 = Header.RvaToFileOffset(nameRva); var hint = Accessor.ReadUInt16(offset2); var chars = new List<byte>(); for(;;) { var ch = Accessor.ReadByte(offset2 + 2 + chars.Count); if(ch == 0) { var symbol = new ImportedSymbol { Name = Encoding.ASCII.GetString(chars.ToArray()), Hint = hint, }; if(symbol.Name.Contains("@@")) symbol.UndecoratedName = GetUndecoratedName(symbol.Name); library.Symbols.Add(symbol); break; } chars.Add(ch); }; } pointer += size; } imports.Add(library); library = null; offset += 20; } return imports; } [DllImport("dbghelp.dll", CharSet = CharSet.Unicode), SuppressUnmanagedCodeSecurity] internal static extern uint UnDecorateSymbolName(string name, StringBuilder undecoratedName, int length, uint flags); public static string GetUndecoratedName(string name, uint flags = 0) { var sb = new StringBuilder(128); if(UnDecorateSymbolName(name, sb, sb.Capacity, flags) == 0) return null; return sb.ToString(); } public unsafe ICollection<ExportedSymbol> GetExports() { var dir = Header.ExportDirectory; var offset = Header.RvaToFileOffset(dir.VirtualAddress); IMAGE_EXPORT_DIRECTORY exportDirectory; Accessor.Read(offset, out exportDirectory); var count = exportDirectory.NumberOfNames; var exports = new List<ExportedSymbol>(count); var namesOffset = exportDirectory.AddressOfNames != 0 ? Header.RvaToFileOffset(exportDirectory.AddressOfNames) : 0; var ordinalOffset = exportDirectory.AddressOfOrdinals != 0 ? Header.RvaToFileOffset(exportDirectory.AddressOfOrdinals) : 0; var functionsOffset = Header.RvaToFileOffset(exportDirectory.AddressOfFunctions); var ordinalBase = exportDirectory.Base; var name = new sbyte[64]; fixed (sbyte* p = name) { for(uint i = 0; i < count; i++) { //read name var offset2 = Accessor.ReadUInt32(namesOffset + i * 4); var offset3 = Header.RvaToFileOffset((int)offset2); Accessor.ReadArray(offset3, name, 0, name.Length); var functionName = new string(p); // read ordinal var ordinal = Accessor.ReadUInt16(ordinalOffset + i * 2) + ordinalBase; // read function address string forwarder = null; var address = Accessor.ReadUInt32(functionsOffset + i * 4); var fileAddress = Header.RvaToFileOffset((int)address); if (fileAddress > dir.VirtualAddress && fileAddress < dir.VirtualAddress + dir.Size) { // forwarder Accessor.ReadArray(Header.RvaToFileOffset((int)address), name, 0, name.Length); forwarder = new string(p); } exports.Add(new ExportedSymbol { Name = functionName, Ordinal = ordinal, Address = address, ForwardName = forwarder, UndecoratedName = forwarder == null ? GetUndecoratedName(functionName) : string.Empty }); } } return exports; } internal IMAGE_LOAD_CONFIG_DIRECTORY32 GetLoadConfigDirectory32() { var dir = Header.LoadConfigurationDirectory; var offset = Header.RvaToFileOffset(dir.VirtualAddress); IMAGE_LOAD_CONFIG_DIRECTORY32 configDirectory; Accessor.Read(offset, out configDirectory); return configDirectory; } internal IMAGE_LOAD_CONFIG_DIRECTORY64 GetLoadConfigDirectory64() { var dir = Header.LoadConfigurationDirectory; var offset = Header.RvaToFileOffset(dir.VirtualAddress); IMAGE_LOAD_CONFIG_DIRECTORY64 configDirectory; Accessor.Read(offset, out configDirectory); return configDirectory; } public LoadConfiguration GetLoadConfiguration() { LoadConfiguration loadConfig; if (Environment.Is64BitProcess) { var config64 = GetLoadConfigDirectory64(); loadConfig = new LoadConfiguration(this, ref config64); } else { var config32 = GetLoadConfigDirectory32(); loadConfig = new LoadConfiguration(this, ref config32); } return loadConfig; } public ICollection<SectionData> GetSectionHeaders() { var sections = new List<SectionData>(); var sectionHeaderSize = Marshal.SizeOf<SectionHeader>(); Debug.Assert(sectionHeaderSize == 40); var offset = Header.PEHeaderSize - Header.NumberOfSections * sectionHeaderSize; for (int i = 0; i < Header.NumberOfSections; i++) { SectionHeader header; Accessor.Read(offset, out header); sections.Add(new SectionData(header)); offset += sectionHeaderSize; } return sections; } public void Dispose() { if(_isOwner) { Accessor.Dispose(); _memFile.Dispose(); _stm.Dispose(); } } } } <file_sep>/PEExplorer/ViewModels/Tabs/GeneralTabViewModel.cs using System; using System.Collections.Generic; using System.ComponentModel.Composition; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Data; using Microsoft.Diagnostics.Runtime.Utilities; using PEExplorer.Core; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class GeneralTabViewModel : TabViewModelBase { [ImportingConstructor] public GeneralTabViewModel(MainViewModel vm) : base(vm) { } public override bool CanClose => false; public override string Icon => "/icons/General.ico"; public override string Text => "General"; public string FullPathName => MainViewModel.PathName; IEnumerable<GenericProperty> _headerProperties; public IEnumerable<GenericProperty> HeaderProperties { get { if(_headerProperties == null) { var header = MainViewModel.PEHeader; _headerProperties = new List<GenericProperty> { new GenericProperty { Name = "Base of Code", Value = ToDecHex(header.BaseOfCode) }, new GenericProperty { Name = "Address of Entry Point", Value = ToDecHex(header.AddressOfEntryPoint) }, new GenericProperty { Name = "Image Base", Value = ToDecHex(header.ImageBase) }, new GenericProperty { Name = "Is Managed?", Value = header.IsManaged.ToString() }, new GenericProperty { Name = "Machine", Value = ((Core.MachineType)header.Machine).ToString() }, new GenericProperty { Name = "Magic", Value = ToDecHex(header.Magic), Info = MagicToPEFormat(header.Magic) }, new GenericProperty { Name = "Major Image Version", Value = header.MajorImageVersion.ToString() }, new GenericProperty { Name = "Minor Image Version", Value = header.MinorImageVersion.ToString() }, new GenericProperty { Name = "Major Linker Version", Value = header.MajorLinkerVersion.ToString() }, new GenericProperty { Name = "Minor Linker Version", Value = header.MinorLinkerVersion.ToString() }, new GenericProperty { Name = "Loader Flags", Value = ToDecHex(header.LoaderFlags) }, new GenericProperty { Name = "Subsystem", Value = header.Subsystem.ToString(), Info = ((SubsystemType)header.Subsystem).ToString() }, new GenericProperty { Name = "Characteristics", Value = ToDecHex(header.Characteristics), Info = ((ImageCharacteristics)header.Characteristics).ToString() }, new GenericProperty { Name = "Dll Characteristics", Value = ToDecHex(header.DllCharacteristics), Info = ((DllCharacteristics)header.DllCharacteristics).ToString() }, new GenericProperty { Name = "File Alignment", Value = ToDecHex(header.FileAlignment) }, new GenericProperty { Name = "Size of Code", Value = ToDecHex(header.SizeOfCode) }, new GenericProperty { Name = "Size of Image", Value = ToDecHex(header.SizeOfImage) }, new GenericProperty { Name = "Major OS Version", Value = header.MajorOperatingSystemVersion.ToString() }, new GenericProperty { Name = "Minor OS Version", Value = header.MinorOperatingSystemVersion.ToString() }, new GenericProperty { Name = "Major Subsystem Version", Value = header.MajorSubsystemVersion.ToString() }, new GenericProperty { Name = "Minor Subsystem Version", Value = header.MinorSubsystemVersion.ToString() }, new GenericProperty { Name = "Size of Headers", Value = ToDecHex(header.SizeOfHeaders) }, new GenericProperty { Name = "Size of Stack Commit", Value = ToDecHex(header.SizeOfStackCommit) }, new GenericProperty { Name = "Size of Stack Reserve", Value = ToDecHex(header.SizeOfStackReserve) }, new GenericProperty { Name = "Size of Heap Commit", Value = ToDecHex(header.SizeOfHeapCommit) }, new GenericProperty { Name = "Size of Heap Reserve", Value = ToDecHex(header.SizeOfHeapReserve) }, new GenericProperty { Name = "Size of Uninitialized Data", Value = ToDecHex(header.SizeOfUninitializedData) }, new GenericProperty { Name = "Size of Initialized Data", Value = ToDecHex(header.SizeOfInitializedData) }, new GenericProperty { Name = "Size of Optional Header", Value = ToDecHex(header.SizeOfOptionalHeader) }, new GenericProperty { Name = "Date Time Stamp", Value = ToDecHex((ulong)header.TimeDateStampSec), Info = header.TimeDateStamp.ToString() }, new GenericProperty { Name = "Section Alignment", Value = ToDecHex(header.SectionAlignment) }, new GenericProperty { Name = "Pointer to Symbol Table", Value = ToDecHex(header.PointerToSymbolTable) }, new GenericProperty { Name = "Number of Sections", Value = header.NumberOfSections.ToString() }, new GenericProperty { Name = "Number of Symbols", Value = header.NumberOfSymbols.ToString() }, new GenericProperty { Name = "Number of RVA and Sizes", Value = header.NumberOfRvaAndSizes.ToString() }, new GenericProperty { Name = "Signature", Value = ToDecHex(header.Signature) }, new GenericProperty { Name = "Checksum", Value = ToDecHex(header.CheckSum) }, header.ImportAddressTableDirectory.Size == 0 ? null : new GenericProperty { Name = "Import Address Table Directory", Value = FromDirectory(header.ImportAddressTableDirectory) }, header.ImportDirectory.Size == 0 ? null : new GenericProperty { Name = "Import Directory", Value = FromDirectory(header.ImportDirectory) }, header.ResourceDirectory.Size == 0 ? null : new GenericProperty { Name = "Resource Directory", Value = FromDirectory(header.ResourceDirectory) }, header.BaseRelocationDirectory.Size == 0 ? null : new GenericProperty { Name = "Base Relocation Directory", Value = FromDirectory(header.BaseRelocationDirectory) }, header.BoundImportDirectory.Size == 0 ? null : new GenericProperty { Name = "Bound Import Directory", Value = FromDirectory(header.BoundImportDirectory) }, header.ExceptionDirectory.Size == 0 ? null : new GenericProperty { Name = "Exception Directory", Value = FromDirectory(header.ExceptionDirectory) }, header.ExportDirectory.Size == 0 ? null : new GenericProperty { Name = "Export Directory", Value = FromDirectory(header.ExportDirectory) }, header.LoadConfigurationDirectory.Size == 0 ? null : new GenericProperty { Name = "Load Configuration Directory", Value = FromDirectory(header.LoadConfigurationDirectory) }, new GenericProperty { Name = "Global Pointer", Value = ToDecHex((uint)header.GlobalPointerDirectory.VirtualAddress) }, header.ComDescriptorDirectory.Size == 0 ? null : new GenericProperty { Name = "CLR Descriptor Directory", Value = FromDirectory(header.ComDescriptorDirectory) }, header.DebugDirectory.Size == 0 ? null : new GenericProperty { Name = "Debug Directory", Value = FromDirectory(header.DebugDirectory) }, header.DelayImportDirectory.Size == 0 ? null : new GenericProperty { Name = "Delay Import Directory", Value = FromDirectory(header.DelayImportDirectory) }, header.ArchitectureDirectory.Size == 0 ? null : new GenericProperty { Name = "Architecture Directory", Value = FromDirectory(header.ArchitectureDirectory) }, header.ThreadStorageDirectory.Size == 0 ? null : new GenericProperty { Name = "Thread Storage Directory", Value = FromDirectory(header.ThreadStorageDirectory) }, header.CertificatesDirectory.Size == 0 ? null : new GenericProperty { Name = "Certificate Directory", Value = FromDirectory(header.CertificatesDirectory) }, }.Where(p => p != null).OrderBy(p => p.Name); } return _headerProperties; } } private string FromDirectory(IMAGE_DATA_DIRECTORY dir) { return $"Offset: {ToDecHex((uint)dir.VirtualAddress)}, Size:{ToDecHex((uint)dir.Size)}"; } private string MagicToPEFormat(ushort magic) { switch(magic) { case 0x10b: return "PE32"; case 0x20b: return "PE32+"; case 0x107: return "ROM"; } return "Unknown"; } private string _searchText; public string SearchText { get { return _searchText; } set { if(SetProperty(ref _searchText, value)) { var view = CollectionViewSource.GetDefaultView(HeaderProperties); if(string.IsNullOrWhiteSpace(value)) view.Filter = null; else { var name = value.ToLower(); view.Filter = o => ((GenericProperty)o).Name.ToLower().Contains(name); } } } } } } <file_sep>/PEExplorer/ViewModels/InstructionViewModel.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using SharpDisasm; namespace PEExplorer.ViewModels { class InstructionViewModel { public Instruction Instruction { get; } public InstructionViewModel(Instruction instruction) { Instruction = instruction; } public string Bytes => string.Join(" ", Instruction.Bytes); public string Address => Instruction.Offset.ToString("X8"); public string Text => Instruction.ToString().Substring(8); } } <file_sep>/PEExplorer/ViewModels/Tabs/DependenciesTabViewModel.cs using Microsoft.Diagnostics.Runtime.Utilities; using PEExplorer.Core; using Prism.Mvvm; using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.ComponentModel.Composition; using System.Diagnostics; using System.IO; using System.IO.MemoryMappedFiles; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.ViewModels.Tabs { sealed class DependencyTreeItem : BindableBase { MemoryMappedViewAccessor _accessor; DependenciesTabViewModel _tab; public DependencyTreeItem(DependenciesTabViewModel tab, string filename, bool apiSet, MemoryMappedViewAccessor accessor = null, IEnumerable<object> exports = null) { FilePath = filename; _accessor = accessor; IsApiSet = apiSet; _tab = tab; _exports = exports; } public string Text { get; set; } public string Icon { get; set; } public string FilePath { get; set; } public bool IsApiSet { get; } private bool _isExpanded; public bool IsExpanded { get { return _isExpanded; } set { SetProperty(ref _isExpanded, value); } } private bool _isSelected; public bool IsSelected { get { return _isSelected; } set { SetProperty(ref _isSelected, value); } } IEnumerable<object> _exports; public IEnumerable<object> Exports { get { if(_exports == null) { if(IsApiSet) { ImportedLibrary library; if(_tab.Imports.TryGetValue(FilePath, out library)) _exports = library.Symbols; } else { try { using(var pe = new PEFile(FilePath)) { using(var parser = new PEFileParser(pe, FilePath)) { _exports = parser.GetExports(); } } } catch { } } } return _exports; } } List<DependencyTreeItem> _items; public IEnumerable<DependencyTreeItem> Items { get { if(IsApiSet) return null; if(_items == null) { _items = new List<DependencyTreeItem>(8); PEFile pefile; try { pefile = new PEFile(FilePath); } catch(FileNotFoundException) { return null; } using(var parser = new PEFileParser(pefile, FilePath, _accessor)) { var imports = parser.GetImports(); if(imports == null) return _items; foreach(var library in imports) { var path = Environment.SystemDirectory + "\\" + library.LibraryName; bool apiSet = library.LibraryName.StartsWith("api-ms-"); _items.Add(new DependencyTreeItem(_tab, apiSet ? library.LibraryName : path, apiSet) { Text = library.LibraryName, Icon = apiSet ? "/icons/apiset.ico" : "/icons/library.ico", }); } pefile.Dispose(); } } return _items; } } } [Export, PartCreationPolicy(CreationPolicy.NonShared)] sealed class DependenciesTabViewModel : TabViewModelBase, IPartImportsSatisfiedNotification { DependencyTreeItem[] _items; public DependencyTreeItem[] Dependencies => _items ?? (_items = _root.Items.ToArray()); [ImportingConstructor] public DependenciesTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/dependencies.ico"; public override string Text => "Dependencies"; DependencyTreeItem _root; public DependencyTreeItem PEImage => _root ?? (_root = new DependencyTreeItem(this, MainViewModel.PathName, false, MainViewModel.Accessor, MainViewModel.PEParser.GetExports()) { Text = MainViewModel.FileName, Icon = "/icons/data.ico", }); private DependencyTreeItem _selectedItem; public DependencyTreeItem SelectedItem { get { return _selectedItem; } set { SetProperty(ref _selectedItem, value); } } public Dictionary<string, ImportedLibrary> Imports { get; private set; } public void OnImportsSatisfied() { var imports = MainViewModel.PEParser.GetImports(); if (imports != null) { Imports = imports.ToDictionary(library => library.LibraryName); } } } } <file_sep>/PEExplorer.Core/Enums.cs using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace PEExplorer.Core { public enum SubsystemType : ushort { Unknown, Native, WindowsGUI, WindowsCUI, PosixCUI = 7, WindowsCEGUI = 9, EfiApplication = 10, EfiBootServiceDriver = 11, EfiRuntimeDriver = 12, EfiROM = 13, XBOX = 14 } [Flags] public enum DllCharacteristics : ushort { None = 0, HighEntropyVA = 0x20, DynamicBase = 0x40, ForceIntegrity = 0x80, NxCompat = 0x100, NoIsolation = 0x200, NoSEH = 0x400, NoBind = 0x800, AppContainer = 0x1000, WDMDriver = 0x2000, ControlFlowGuard = 0x4000, TerminalServerAware = 0x8000 } [Flags] public enum ImageCharacteristics : ushort { RelocsStripped = 1, ExecutableImage = 2, LineNumsStripped = 4, LocalSymbolsStripped = 8, AggressiveTrimWorkingSet = 0x10, LargeAddressAware = 0x20, LittleEndian = 0x80, Machine32Bit = 0x100, DebugInfoStripped = 0x200, RemovableRunFromSwap = 0x400, NetRunFromSwap = 0x800, SystemFile = 0x1000, DllFile = 0x2000, SingleCpuOnly = 0x4000, BigEndian = 0x8000 } public enum ImageDebugType { Unknown, Coff, CodeView, Fpo, Misc, Exception, Fixup, Borland = 9 } [Flags] public enum ControlFlowGuardFlags { Instrumented = 0x100, WriteInstrumented = 0x200, FunctionTablePresent = 0x400, SecurityCookieUnused = 0x800, ProtectDelayLoadIAT = 0x1000, DelayLoadIATOwnSection = 0x2000, ExportSuppressInfo = 0x4000, EnableExportSuppression = 0x8000, LongJumpTablePresent = 0x10000 } } <file_sep>/README.md # PEExplorer Portable Executable Explorer <file_sep>/PEExplorer/ViewModels/Tabs/ResourcesTabViewModel.cs using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.ComponentModel.Composition; using System.ComponentModel.Composition.Hosting; using System.Diagnostics; using System.IO; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Input; using PEExplorer.Core; using PEExplorer.ViewModels.Resources; using Prism.Commands; using Zodiacon.WPF; namespace PEExplorer.ViewModels.Tabs { [Export, PartCreationPolicy(CreationPolicy.NonShared)] class ResourcesTabViewModel : TabViewModelBase, IDisposable { ResourceManager _resourceManager; [ImportingConstructor] public ResourcesTabViewModel(MainViewModel vm) : base(vm) { } public override string Icon => "/icons/resources.ico"; public override string Text => "Resources"; ICollection<ResourceTypeViewModel> _resources; public ICollection<ResourceTypeViewModel> Resources => _resources ?? (_resources = GetResources()); private ICollection<ResourceTypeViewModel> GetResources() { _resourceManager = new ResourceManager(MainViewModel.PathName); var resources = new List<ResourceTypeViewModel>(); foreach(var type in _resourceManager.GetResourceTypes()) { var resourceType = new ResourceTypeViewModel(_resourceManager) { ResourceType = type }; MainViewModel.Container.SatisfyImportsOnce(resourceType); foreach(var resource in _resourceManager.GetResourceNames(type)) { var vm = resourceType.CreateResourceViewModel(resource); resourceType.Resources.Add(vm); } resources.Add(resourceType); } return resources; } public void Dispose() { _resourceManager.Dispose(); } [Import] IFileDialogService FileDialogService; ICommand _exportCommand; public ICommand ExportCommand => _exportCommand ?? (_exportCommand = new DelegateCommand<object>(res => { var file = FileDialogService.GetFileForSave(); if(file == null) return; File.WriteAllBytes(file, ((ResourceViewModel)res).GetContents()); }, res => res is ResourceViewModel).ObservesProperty(() => SelectedTreeItem)); private object _selectedTreeItem; public object SelectedTreeItem { get { return _selectedTreeItem; } set { SetProperty(ref _selectedTreeItem, value); } } } } <file_sep>/PEExplorer/Converters/BytesToStringConverter.cs using System; using System.Collections.Generic; using System.Globalization; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Data; namespace PEExplorer.Converters { class BytesToStringConverter : IValueConverter { public object Convert(object value, Type targetType, object parameter, CultureInfo culture) { return string.Join(" ", ((byte[])value).Select(v => v.ToString("X2"))); } public object ConvertBack(object value, Type targetType, object parameter, CultureInfo culture) { throw new NotImplementedException(); } } }
89c84dff86ec7614db1c3afd531a0d3adfab5195
[ "Markdown", "C#" ]
36
C#
WasserEsser/PEExplorer
b0db3d9356023241626afc587f60ad7570aafad4
2b47df9ee3437c2c6cb98b128e10e5be26a6623a
refs/heads/main
<file_sep>const firstName = 'Justin'; const lastName = 'TimberLake'; const fullName = firstName+' '+ lastName + ' '+ "is a good boy"; const fullName2 = `${firstName} ${'OOOO'} is a good boy.`; const multiLine = `I love you I miss you no, I donot need you Baily road e dorkar nai.` console.log(multiLine); <file_sep>// function doubleIt(num){ // return num * 2; // } const double = num => num * 2; const add = (x, y) => x + y; const result = double(50); console.log(result); const give5 = () => 5; const res = add(5,8); console.log(res); console.log(give5);<file_sep>const person = {name : '<NAME>' , age : 17, job : "facebook" ,gfName : '<NAME>', address : 'Kochu Khet' , phone : '0178011396', friends : ['<NAME>', '<NAME>', '<NAME>']}; const{gfName, address, job}= person; console.log(gfName,address,job);
2c46c0b9bdded8758a08ca37508982b4041cd410
[ "JavaScript" ]
3
JavaScript
dipraj-howlader/es6-practice
91dfb27d2e55ceb124cd054cc575a6c7ef49bffb
5c3f6893460599e19023b9c512085184864bd0da