text
stringlengths
37
1.41M
''' -Medium- *Backtracking* *Bit Manipulation* A word's generalized abbreviation can be constructed by taking any number of non-overlapping substrings and replacing them with their respective lengths. For example, "abcde" can be abbreviated into "a3e" ("bcd" turned into "3"), "1bcd1" ("a" and "e" both turned into "1"), and "23" ("ab" turned into "2" and "cde" turned into "3"). Given a string word, return a list of all the possible generalized abbreviations of word. Return the answer in any order. Example 1: Input: word = "word" Output: ["4","3d","2r1","2rd","1o2","1o1d","1or1","1ord","w3","w2d","w1r1","w1rd","wo2","wo1d","wor1","word"] Example 2: Input: word = "a" Output: ["1","a"] Constraints: 1 <= word.length <= 15 word consists of only lowercase English letters. ''' class Solution: """ @param word: the given word @return: the generalized abbreviations of a word """ def generateAbbreviations(self, word): # Write your code here ans = [] # index为当前位置 # temp为当前组成的缩略字符串 # abb为当前缩略长度 def backtrack(index, path, abb): if index == len(word): # 如果存在缩略长度,将其加入temp if abb > 0: path += str(abb) ans.append(path) return # 当前字符 c = word[index] newPath = path # 不缩略当前字符 # 如果存在缩略长度,将其加入temp if abb > 0: newPath += str(abb) # 将当前字符加入temp newPath += c # 不缩略当前字符继续递归 backtrack(index+1, newPath, 0) # 缩略当前字符继续递归 backtrack(index+1, path, abb+1) backtrack(0, '', 0) return ans def generateAbbreviationsBit(self, word): # Write your code here ans = [] def abbr(x): k, s = 0, '' for i in range(len(word)): if x & 1 == 0: if k > 0: s += str(k) k = 0 s += word[i] else: k += 1 x >>= 1 if k > 0: s += str(k) return s for i in range(1<<len(word)): ans.append(abbr(i)) return ans if __name__ == "__main__": print(Solution().generateAbbreviations("word")) print(Solution().generateAbbreviationsBit("word"))
''' -Medium- You are given a 0-indexed integer array players, where players[i] represents the ability of the ith player. You are also given a 0-indexed integer array trainers, where trainers[j] represents the training capacity of the jth trainer. The ith player can match with the jth trainer if the player's ability is less than or equal to the trainer's training capacity. Additionally, the ith player can be matched with at most one trainer, and the jth trainer can be matched with at most one player. Return the maximum number of matchings between players and trainers that satisfy these conditions. Example 1: Input: players = [4,7,9], trainers = [8,2,5,8] Output: 2 Explanation: One of the ways we can form two matchings is as follows: - players[0] can be matched with trainers[0] since 4 <= 8. - players[1] can be matched with trainers[3] since 7 <= 8. It can be proven that 2 is the maximum number of matchings that can be formed. Example 2: Input: players = [1,1,1], trainers = [10] Output: 1 Explanation: The trainer can be matched with any of the 3 players. Each player can only be matched with one trainer, so the maximum answer is 1. Constraints: 1 <= players.length, trainers.length <= 105 1 <= players[i], trainers[j] <= 109 ''' from typing import List class Solution: def matchPlayersAndTrainers(self, players: List[int], trainers: List[int]) -> int: P, T = players, trainers P.sort() T.sort() ans = 0 i, j = 0, 0 while i < len(P) and j < len(T): if P[i] <= T[j]: ans += 1 i += 1 j += 1 return ans if __name__=="__main__": print(Solution().matchPlayersAndTrainers(players = [4,7,9], trainers = [8,2,5,8])) print(Solution().matchPlayersAndTrainers(players = [1,1,1], trainers = [10]))
''' -Medium- Given a list of words and two words word1 and word2 , return the shortest distance between these two words in the list. word1 and word2 may be the same and they represent two individual words in the list. Example: Assume that words = ["practice", "makes", "perfect", "coding", "makes"]. Input: _word1_ = “makes”, _word2_ = “coding” Output: 1 Input: _word1_ = "makes", _word2_ = "makes" Output: 3 Note: You may assume word1 and word2 are both in the list. ''' class Solution: def shortestDistanceEqual(self, words, word1, word2): """ :type words: list[str] :type word1: str :type word2: str :rtype: int """ distance = len(words) idx = -1 for i,w in enumerate(words): if w == word1 or w == word2: if idx != -1 and (word1 == word2 or w != words[idx]): distance = min(distance, i-idx) idx = i return distance if __name__ == "__main__": words = ["practice", "makes", "perfect", "coding", "makes", "study", "makes"] #print Solution().shortestDistance(words, "practice", "coding") #print Solution().shortestDistance(words, "makes", "coding") print(Solution().shortestDistanceEqual(words, "makes", "makes"))
''' -Easy- You are given a 2D integer array ranges and two integers left and right. Each ranges[i] = [starti, endi] represents an inclusive interval between starti and endi. Return true if each integer in the inclusive range [left, right] is covered by at least one interval in ranges. Return false otherwise. An integer x is covered by an interval ranges[i] = [starti, endi] if starti <= x <= endi. Example 1: Input: ranges = [[1,2],[3,4],[5,6]], left = 2, right = 5 Output: true Explanation: Every integer between 2 and 5 is covered: - 2 is covered by the first range. - 3 and 4 are covered by the second range. - 5 is covered by the third range. Example 2: Input: ranges = [[1,10],[10,20]], left = 21, right = 21 Output: false Explanation: 21 is not covered by any range. Constraints: 1 <= ranges.length <= 50 1 <= starti <= endi <= 50 1 <= left <= right <= 50 ''' from typing import List class Solution: def isCovered(self, ranges: List[List[int]], left: int, right: int) -> bool: events = [] for s,e in ranges: events.append((s, -1)) events.append((e, 1)) events.sort() pos = [False]*(right+1) cnt, xpos = 0, 0 for x, t in events: if t == -1: cnt += 1 xpos = x else: if xpos > 0 and cnt > 0: for j in range(xpos, x+1): pos[j] = True cnt -= 1 print(pos) return all(pos[left:right+1]) def isCovered2(self, ranges: List[List[int]], left: int, right: int) -> bool: seen = [0] * 52 for l, r in ranges: seen[l] += 1 seen[r + 1] -= 1 for i in range(1, 52): seen[i] += seen[i - 1] return all(seen[i] for i in range(left, right + 1)) if __name__ == "__main__": print(Solution().isCovered2(ranges = [[1,2],[3,4],[5,6]], left = 2, right = 5))
''' -Medium- You are given a 0-indexed string expression of the form "<num1>+<num2>" where <num1> and <num2> represent positive integers. Add a pair of parentheses to expression such that after the addition of parentheses, expression is a valid mathematical expression and evaluates to the smallest possible value. The left parenthesis must be added to the left of '+' and the right parenthesis must be added to the right of '+'. Return expression after adding a pair of parentheses such that expression evaluates to the smallest possible value. If there are multiple answers that yield the same result, return any of them. The input has been generated such that the original value of expression, and the value of expression after adding any pair of parentheses that meets the requirements fits within a signed 32-bit integer. Example 1: Input: expression = "247+38" Output: "2(47+38)" Explanation: The expression evaluates to 2 * (47 + 38) = 2 * 85 = 170. Note that "2(4)7+38" is invalid because the right parenthesis must be to the right of the '+'. It can be shown that 170 is the smallest possible value. Example 2: Input: expression = "12+34" Output: "1(2+3)4" Explanation: The expression evaluates to 1 * (2 + 3) * 4 = 1 * 5 * 4 = 20. Example 3: Input: expression = "999+999" Output: "(999+999)" Explanation: The expression evaluates to 999 + 999 = 1998. Constraints: 3 <= expression.length <= 10 expression consists of digits from '1' to '9' and '+'. expression starts and ends with digits. expression contains exactly one '+'. The original value of expression, and the value of expression after adding any pair of parentheses that meets the requirements fits within a signed 32-bit integer. ''' class Solution: def minimizeResult(self, expression: str) -> str: s1, s2 = expression.split('+') ret, ans = 2**32-1, '' for i in range(len(s1)): a, b1 = 1 if not s1[:i] else int(s1[:i]), 1 if not s1[i:] else int(s1[i:]) for j in range(1,len(s2)+1): b2, c = 1 if not s2[:j] else int(s2[:j]), 1 if not s2[j:] else int(s2[j:]) prod = a*(b1+b2)*c if prod < ret: ret = prod ans = s1[:i]+'('+s1[i:]+'+'+s2[:j]+')'+s2[j:] return ans if __name__ == "__main__": print(Solution().minimizeResult("247+38"))
''' -Hard- Given a linked list, reverse the nodes of a linked list k at a time and return its modified list. k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes, in the end, should remain as it is. You may not alter the values in the list's nodes, only nodes themselves may be changed. Example 1: Input: head = [1,2,3,4,5], k = 2 Output: [2,1,4,3,5] Example 2: Input: head = [1,2,3,4,5], k = 3 Output: [3,2,1,4,5] Example 3: Input: head = [1,2,3,4,5], k = 1 Output: [1,2,3,4,5] Example 4: Input: head = [1], k = 1 Output: [1] Constraints: The number of nodes in the list is in the range sz. 1 <= sz <= 5000 0 <= Node.val <= 1000 1 <= k <= sz Follow-up: Can you solve the problem in O(1) extra memory space? ''' # Definition for singly-linked list. class ListNode(object): def __init__(self, x): self.val = x self.next = None class Solution(object): def reverse(self, head): if head is None or head.next is None: return head temp = self.reverse(head.next) head.next.next = head head.next = None return temp def reverseKGroup(self, head, k): dummy = ListNode(-1) dummy.next = head pre = dummy cur = head num = 0 while cur: cur = cur.next num += 1 while num >= k: cur = pre.next # pre cur t # | | | # dummy-> 1 -> 2 -> 3 -> 4 -> 5 for _ in range(1, k): t = cur.next cur.next = t.next t.next = pre.next pre.next = t ''' after 1 exchange ''' # pre t cur # | | | # dummy-> 2 -> 1 -> 3 -> 4 -> 5 ''' after 2 exchanges ''' # pre t cur # | | | # dummy-> 3 -> 2 -> 1 -> 4 -> 5 pre = cur num -= k return dummy.next if __name__ == "__main__": a = ListNode(10) b = ListNode(11) c = ListNode(12) d = ListNode(13) e = ListNode(14) a.next = b b.next = c c.next = d d.next = e x = a while x is not None: print(str(x.val)+'->',end='') x = x.next print('\n===========\n') #e=Solution().reverse(a) e=Solution().reverseKGroup(a,3) x = e while x is not None: print(str(x.val)+'->',end='') x = x.next print()
''' -Medium- You are given two strings s and p where p is a subsequence of s. You are also given a distinct 0-indexed integer array removable containing a subset of indices of s (s is also 0-indexed). You want to choose an integer k (0 <= k <= removable.length) such that, after removing k characters from s using the first k indices in removable, p is still a subsequence of s. More formally, you will mark the character at s[removable[i]] for each 0 <= i < k, then remove all marked characters and check if p is still a subsequence. Return the maximum k you can choose such that p is still a subsequence of s after the removals. A subsequence of a string is a new string generated from the original string with some characters (can be none) deleted without changing the relative order of the remaining characters. Example 1: Input: s = "abcacb", p = "ab", removable = [3,1,0] Output: 2 Explanation: After removing the characters at indices 3 and 1, "abcacb" becomes "accb". "ab" is a subsequence of "accb". If we remove the characters at indices 3, 1, and 0, "abcacb" becomes "ccb", and "ab" is no longer a subsequence. Hence, the maximum k is 2. Example 2: Input: s = "abcbddddd", p = "abcd", removable = [3,2,1,4,5,6] Output: 1 Explanation: After removing the character at index 3, "abcbddddd" becomes "abcddddd". "abcd" is a subsequence of "abcddddd". Example 3: Input: s = "abcab", p = "abc", removable = [0,1,2,3,4] Output: 0 Explanation: If you remove the first index in the array removable, "abc" is no longer a subsequence. Constraints: 1 <= p.length <= s.length <= 105 0 <= removable.length < s.length 0 <= removable[i] < s.length p is a subsequence of s. s and p both consist of lowercase English letters. The elements in removable are distinct. ''' from typing import List class Solution: def maximumRemovals(self, s: str, p: str, removable: List[int]) -> int: def isSubsequence(s, t): i = j = 0 while i < len(s) and j < len(t): if s[i] == t[j]: i += 1 j += 1 return i == len(s) #print('a:', isSubsequence(p, "qlevcvgzfpryqlwy")) #print('b:', isSubsequence(p, "qlevcgzfpryqlwy")) l, r = 0, len(removable)+1 while l < r: mid = l + (r-l)//2 t = '' mark = set(removable[:mid]) for i in range(len(s)): t += s[i] if i not in mark else '' if len(p) <= len(t) and isSubsequence(p, t): l = mid + 1 else: r = mid return l-1 if __name__ == "__main__": print(Solution().maximumRemovals(s = "abcacb", p = "ab", removable = [3,1,0])) print(Solution().maximumRemovals(s = "abcbddddd", p = "abcd", removable = [3,2,1,4,5,6])) print(Solution().maximumRemovals(s = "abcab", p = "abc", removable = [0,1,2,3,4])) print(Solution().maximumRemovals("qlevcvgzfpryiqlwy", "qlecfqlw", [12,5]))
''' -Medium- *BFS* Given the root of a binary tree, replace the value of each node in the tree with the sum of all its cousins' values. Two nodes of a binary tree are cousins if they have the same depth with different parents. Return the root of the modified tree. Note that the depth of a node is the number of edges in the path from the root node to it. Example 1: Input: root = [5,4,9,1,10,null,7] Output: [0,0,0,7,7,null,11] Explanation: The diagram above shows the initial binary tree and the binary tree after changing the value of each node. - Node with value 5 does not have any cousins so its sum is 0. - Node with value 4 does not have any cousins so its sum is 0. - Node with value 9 does not have any cousins so its sum is 0. - Node with value 1 has a cousin with value 7 so its sum is 7. - Node with value 10 has a cousin with value 7 so its sum is 7. - Node with value 7 has cousins with values 1 and 10 so its sum is 11. Example 2: Input: root = [3,1,2] Output: [0,0,0] Explanation: The diagram above shows the initial binary tree and the binary tree after changing the value of each node. - Node with value 3 does not have any cousins so its sum is 0. - Node with value 1 does not have any cousins so its sum is 0. - Node with value 2 does not have any cousins so its sum is 0. Constraints: The number of nodes in the tree is in the range [1, 105]. 1 <= Node.val <= 104 ''' # Definition for a binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right from typing import Optional from collections import deque class Solution: def replaceValueInTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]: que = [root] sums = [] while que: nq = deque() t = 0 for _ in range(len(que)): node = que.popleft() t += node.val if node.left: nq.append(node.left) if node.right: nq.append(node.right) sums.append(t) que = nq que = [root] l = 1 while que: nq = deque() for _ in range(len(que)): node = que.popleft() s += node.left.val if node.left else 0 s += node.right.val if node.right else 0 if node.left: node.left.val = sums[l] - s nq.append(node.left) if node.right: node.right.val = sums[l] - s nq.append(node.right) l += 1 que = nq return root
''' -Easy- Alice and Bob are traveling to Rome for separate business meetings. You are given 4 strings arriveAlice, leaveAlice, arriveBob, and leaveBob. Alice will be in the city from the dates arriveAlice to leaveAlice (inclusive), while Bob will be in the city from the dates arriveBob to leaveBob (inclusive). Each will be a 5-character string in the format "MM-DD", corresponding to the month and day of the date. Return the total number of days that Alice and Bob are in Rome together. You can assume that all dates occur in the same calendar year, which is not a leap year. Note that the number of days per month can be represented as: [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]. Example 1: Input: arriveAlice = "08-15", leaveAlice = "08-18", arriveBob = "08-16", leaveBob = "08-19" Output: 3 Explanation: Alice will be in Rome from August 15 to August 18. Bob will be in Rome from August 16 to August 19. They are both in Rome together on August 16th, 17th, and 18th, so the answer is 3. Example 2: Input: arriveAlice = "10-01", leaveAlice = "10-31", arriveBob = "11-01", leaveBob = "12-31" Output: 0 Explanation: There is no day when Alice and Bob are in Rome together, so we return 0. Constraints: All dates are provided in the format "MM-DD". Alice and Bob's arrival dates are earlier than or equal to their leaving dates. The given dates are valid dates of a non-leap year. ''' class Solution: def countDaysTogether(self, arriveAlice: str, leaveAlice: str, arriveBob: str, leaveBob: str) -> int: days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] m, d = [int(x) for x in arriveAlice.split('-')] As = sum(days[:m-1])+d-1 m, d = [int(x) for x in leaveAlice.split('-')] Ae = sum(days[:m-1])+d-1 m, d = [int(x) for x in arriveBob.split('-')] Bs = sum(days[:m-1])+d-1 m, d = [int(x) for x in leaveBob.split('-')] Be = sum(days[:m-1])+d-1 # print(As, Ae, Bs, Be) if As > Be or Ae < Bs: return 0 elif As >= Bs and Ae <= Be: return Ae-As+1 elif Bs >= As and Be <= Ae: return Be-Bs+1 elif Bs >= As: return Ae-Bs+1 else: return Be-As+1 if __name__=="__main__": print(Solution().countDaysTogether("10-01", "10-31", "11-01", "12-31"))
''' -Medium- *Prefix Sum* You are given a 0-indexed integer array nums of length n and an integer k. In an operation, you can choose an element and multiply it by 2. Return the maximum possible value of nums[0] | nums[1] | ... | nums[n - 1] that can be obtained after applying the operation on nums at most k times. Note that a | b denotes the bitwise or between two integers a and b. Example 1: Input: nums = [12,9], k = 1 Output: 30 Explanation: If we apply the operation to index 1, our new array nums will be equal to [12,18]. Thus, we return the bitwise or of 12 and 18, which is 30. Example 2: Input: nums = [8,1,2], k = 2 Output: 35 Explanation: If we apply the operation twice on index 0, we yield a new array of [32,1,2]. Thus, we return 32|1|2 = 35. Constraints: 1 <= nums.length <= 105 1 <= nums[i] <= 109 1 <= k <= 15 ''' from typing import List class Solution: def maximumOr(self, nums: List[int], k: int) -> int: # The optimal solution should apply all the k operations on a single number. n = len(nums) if n == 1: return nums[0] << k suf = [0]*n suf[-1] = nums[-1] for i in range(n-2, -1, -1): suf[i] = suf[i+1] | nums[i] ans = 0 pre = nums[0] ans = max(ans, (pre << k) | suf[1]) for i in range(1, n-1): ans = max(ans, pre | (nums[i] << k) | suf[i+1]) pre = pre | nums[i] ans = max(ans, pre | (nums[n-1] << k)) return ans if __name__ == "__main__": print(Solution().maximumOr(nums = [12,9], k = 1))
''' -Hard- $$$ *BFS* Design the basic function of Excel and implement the function of the sum formula. Implement the Excel class: Excel(int height, char width) Initializes the object with the height and the width of the sheet. The sheet is an integer matrix mat of size height x width with the row index in the range [1, height] and the column index in the range ['A', width]. All the values should be zero initially. void set(int row, char column, int val) Changes the value at mat[row][column] to be val. int get(int row, char column) Returns the value at mat[row][column]. int sum(int row, char column, List<String> numbers) Sets the value at mat[row][column] to be the sum of cells represented by numbers and returns the value at mat[row][column]. This sum formula should exist until this cell is overlapped by another value or another sum formula. numbers[i] could be on the format: "ColRow" that represents a single cell. For example, "F7" represents the cell mat[7]['F']. "ColRow1:ColRow2" that represents a range of cells. The range will always be a rectangle where "ColRow1" represent the position of the top-left cell, and "ColRow2" represents the position of the bottom-right cell. For example, "B3:F7" represents the cells mat[i][j] for 3 <= i <= 7 and 'B' <= j <= 'F'. Note: You could assume that there will not be any circular sum reference. For example, mat[1]['A'] == sum(1, "B") and mat[1]['B'] == sum(1, "A"). Example 1: Input ["Excel", "set", "sum", "set", "get"] [[3, "C"], [1, "A", 2], [3, "C", ["A1", "A1:B2"]], [2, "B", 2], [3, "C"]] Output [null, null, 4, null, 6] Explanation Excel excel = new Excel(3, "C"); // construct a 3*3 2D array with all zero. // A B C // 1 0 0 0 // 2 0 0 0 // 3 0 0 0 excel.set(1, "A", 2); // set mat[1]["A"] to be 2. // A B C // 1 2 0 0 // 2 0 0 0 // 3 0 0 0 excel.sum(3, "C", ["A1", "A1:B2"]); // return 4 // set mat[3]["C"] to be the sum of value at mat[1]["A"] and the values sum of the rectangle range whose top-left cell is mat[1]["A"] and bottom-right cell is mat[2]["B"]. // A B C // 1 2 0 0 // 2 0 0 0 // 3 0 0 4 excel.set(2, "B", 2); // set mat[2]["B"] to be 2. Note mat[3]["C"] should also be changed. // A B C // 1 2 0 0 // 2 0 2 0 // 3 0 0 6 excel.get(3, "C"); // return 6 Constraints: 1 <= height <= 26 'A' <= width <= 'Z' 1 <= row <= height 'A' <= column <= width -100 <= val <= 100 1 <= numbers.length <= 5 numbers[i] has the format "ColRow" or "ColRow1:ColRow2". At most 100 calls will be made to set, get, and sum. ''' import collections class Excel(object): # 解题思路: # 观察者模式(Observer Pattern) # 为单元格cell注册观察者列表target(关心cell变化的单元格), # 被观察者列表source(变化会影响到cell的单元格) # 利用字典values存储每个单元格的值 # 单元格之间的观察者关系为图结构,当某一单元格发生变化时,其所有观察者节点均会依次发生变化 # 对于某单元格触发的观察者单元格更新操作,可以利用BFS实现 # 当执行set操作时,清除单元格的被观察者列表,然后更新其观察者列表的值 # 当执行sum操作时,清除单元格的被观察者列表,然后重新注册其被观察者,并更新其被观察者的观察关系, # 最后更新其观察者列表的值 def __init__(self, H, W): """ :type H: int :type W: str """ self.col = lambda c: ord(c) - ord('A') self.H, self.W = H, self.col(W) + 1 self.values = collections.defaultdict(int) self.target = collections.defaultdict(lambda : collections.defaultdict(int)) self.source = collections.defaultdict(lambda : collections.defaultdict(int)) self.getIdx = lambda r, c: (r - 1) * self.W + self.col(c) def updateTgt(self, idx, delta): queue = [idx] while queue: first = queue.pop(0) for tgt in self.target[first]: self.values[tgt] += self.target[first][tgt] * delta queue.append(tgt) def removeSrc(self, idx): for src in self.source[idx]: del self.target[src][idx] del self.source[idx] def set(self, r, c, v): """ :type r: int :type c: str :type v: int :rtype: void """ idx = self.getIdx(r, c) delta = v - self.values[idx] self.values[idx] = v self.removeSrc(idx) self.updateTgt(idx, delta) def get(self, r, c): """ :type r: int :type c: str :rtype: int """ return self.values[self.getIdx(r, c)] def sum(self, r, c, strs): """ :type r: int :type c: str :type strs: List[str] :rtype: int """ idx = self.getIdx(r, c) self.removeSrc(idx) cval = self.values[idx] self.values[idx] = 0 for src in strs: if ':' not in src: sc, sr = src[0], int(src[1:]) sidx = self.getIdx(sr, sc) self.target[sidx][idx] += 1 self.source[idx][sidx] += 1 self.values[idx] += self.values[sidx] else: st, ed = src.split(':') for r in range(int(st[1:]), int(ed[1:]) + 1): for c in range(self.col(st[0]), self.col(ed[0]) + 1): sidx = (r - 1) * self.W + c self.target[sidx][idx] += 1 self.source[idx][sidx] += 1 self.values[idx] += self.values[sidx] self.updateTgt(idx, self.values[idx] - cval) return self.values[idx] # Your Excel object will be instantiated and called as such: # obj = Excel(H, W) # obj.set(r,c,v) # param_2 = obj.get(r,c) # param_3 = obj.sum(r,c,strs)
''' -Medium- A split of an integer array is good if: The array is split into three non-empty contiguous subarrays - named left, mid, right respectively from left to right. The sum of the elements in left is less than or equal to the sum of the elements in mid, and the sum of the elements in mid is less than or equal to the sum of the elements in right. Given nums, an array of non-negative integers, return the number of good ways to split nums. As the number may be too large, return it modulo 10^9 + 7. Example 1: Input: nums = [1,1,1] Output: 1 Explanation: The only good way to split nums is [1] [1] [1]. Example 2: Input: nums = [1,2,2,2,5,0] Output: 3 Explanation: There are three good ways of splitting nums: [1] [2] [2,2,5,0] [1] [2,2] [2,5,0] [1,2] [2,2] [5,0] Example 3: Input: nums = [3,2,1] Output: 0 Explanation: There is no good way to split nums. Constraints: 3 <= nums.length <= 10^5 0 <= nums[i] <= 10^4 ''' class Solution(object): def waysToSplit(self, nums): """ :type nums: List[int] :rtype: int """ n = len(nums) preSum = [0] res = 0 for i in nums: preSum.append(preSum[-1]+i) for k in range(n-1, 1, -1): for j in range(0, k-1): lSum = preSum[j+1] - preSum[0] mSum = preSum[k] - preSum[j+1] rSum = preSum[n] - preSum[k] print(k, j, lSum, mSum, rSum) if lSum <= mSum <= rSum: res += 1 return res def waysToSplitO_N(self, nums): """ :type nums: List[int] :rtype: int """ prefix = [0] for x in nums: prefix.append(prefix[-1] + x) ans = j = k = 0 for i in range(1, len(nums)): j = max(j, i+1) while j < len(nums) and 2*prefix[i] > prefix[j]: j += 1 k = max(k, j) while k < len(nums) and 2*prefix[k] <= prefix[i] + prefix[-1]: k += 1 ans += k - j return ans % 1_000_000_007 def waysToSplitBinarySearch(self, nums): """ :type nums: List[int] :rtype: int """ n = len(nums) preSum = [0]*n preSum[0] = nums[0] res = 0 MOD = 10**9+7 for i in range(1, n): preSum[i] = preSum[i-1]+nums[i] for k in range(1, n-1): if preSum[k - 1] > (preSum[n - 1] - preSum[i - 1]) // 2: break # early termination lSum = preSum[k-1] def helper(i, searchLeft): ret = -1 l, r = i, n-2 while l <= r: m = l + (r-l) // 2 mSum = preSum[m] - preSum[i-1] rSum = preSum[n-1] - preSum[m] if lSum <= mSum <= rSum: ret = m if searchLeft: r = m-1 else: l = m+1 elif lSum > mSum: l = m+1 else: r = m-1 return ret left = helper(k, True) right = helper(k, False) if left == -1 and right == -1: continue res = (res + (right - left + 1) % MOD) % MOD return res if __name__ == "__main__": #print(Solution().waysToSplit([1,2,2,2,5,0])) print(Solution().waysToSplitBinarySearch([1,2,2,2,5,0])) print(Solution().waysToSplitBinarySearch([1,1,1]))
''' -Medium- You are given a 2D integer array groups of length n. You are also given an integer array nums. You are asked if you can choose n disjoint subarrays from the array nums such that the ith subarray is equal to groups[i] (0-indexed), and if i > 0, the (i-1)th subarray appears before the ith subarray in nums (i.e. the subarrays must be in the same order as groups). Return true if you can do this task, and false otherwise. Note that the subarrays are disjoint if and only if there is no index k such that nums[k] belongs to more than one subarray. A subarray is a contiguous sequence of elements within an array. Example 1: Input: groups = [[1,-1,-1],[3,-2,0]], nums = [1,-1,0,1,-1,-1,3,-2,0] Output: true Explanation: You can choose the 0th subarray as [1,-1,0,1,-1,-1,3,-2,0] and the 1st one as [1,-1,0,1,-1,-1,3,-2,0]. These subarrays are disjoint as they share no common nums[k] element. Example 2: Input: groups = [[10,-2],[1,2,3,4]], nums = [1,2,3,4,10,-2] Output: false Explanation: Note that choosing the subarrays [1,2,3,4,10,-2] and [1,2,3,4,10,-2] is incorrect because they are not in the same order as in groups. [10,-2] must come before [1,2,3,4]. Example 3: Input: groups = [[1,2,3],[3,4]], nums = [7,7,1,2,3,4,7,7] Output: false Explanation: Note that choosing the subarrays [7,7,1,2,3,4,7,7] and [7,7,1,2,3,4,7,7] is invalid because they are not disjoint. They share a common elements nums[4] (0-indexed). Constraints: groups.length == n 1 <= n <= 103 1 <= groups[i].length, sum(groups[i].length) <= 103 1 <= nums.length <= 103 -107 <= groups[i][j], nums[k] <= 107 ''' from typing import List class Solution: def canChoose(self, groups: List[List[int]], nums: List[int]) -> bool: n, m = len(groups), len(nums) def dfs(i,j): if j == n: return True if i == m: return False k, l = 0, i while k < len(groups[j]) and l < m and groups[j][k] == nums[l]: k += 1 l += 1 if k == len(groups[j]): return dfs(l, j+1) else: return dfs(i+1, j) return dfs(0, 0) if __name__ == "__main__": print(Solution().canChoose(groups = [[1,-1,-1],[3,-2,0]], nums = [1,-1,0,1,-1,-1,3,-2,0])) print(Solution().canChoose(groups = [[10,-2],[1,2,3,4]], nums = [1,2,3,4,10,-2])) print(Solution().canChoose( groups = [[1,2,3],[3,4]], nums = [7,7,1,2,3,4,7,7])) groups = [[-5,0]] nums = [2,0,-2,5,-1,2,4,3,4,-5,-5] print(Solution().canChoose( groups = groups, nums = nums))
''' -Medium- You are given a 0-indexed 2D integer array peaks where peaks[i] = [xi, yi] states that mountain i has a peak at coordinates (xi, yi). A mountain can be described as a right-angled isosceles triangle, with its base along the x-axis and a right angle at its peak. More formally, the gradients of ascending and descending the mountain are 1 and -1 respectively. A mountain is considered visible if its peak does not lie within another mountain (including the border of other mountains). Return the number of visible mountains. Example 1: Input: peaks = [[2,2],[6,3],[5,4]] Output: 2 Explanation: The diagram above shows the mountains. - Mountain 0 is visible since its peak does not lie within another mountain or its sides. - Mountain 1 is not visible since its peak lies within the side of mountain 2. - Mountain 2 is visible since its peak does not lie within another mountain or its sides. There are 2 mountains that are visible. Example 2: Input: peaks = [[1,3],[1,3]] Output: 0 Explanation: The diagram above shows the mountains (they completely overlap). Both mountains are not visible since their peaks lie within each other. Constraints: 1 <= peaks.length <= 105 peaks[i].length == 2 1 <= xi, yi <= 105 ''' from typing import List from collections import defaultdict, Counter class Solution: def numberOfMountains(self, peaks: List[int]) -> int: m = defaultdict(int) for p in peaks: m[tuple(p)] += 1 invs = [[min(x+y, x-y), max(x+y, x-y)] for x,y in m if m[(x,y)] == 1] invs.sort() # print(invs) i, ans = 0, 0 while i < len(invs): ans += 1 j = i+1 while j < len(invs) and invs[j][1] <= invs[i][1]: j += 1 i = j return ans def numberOfMountains2(self, peaks: List[int]) -> int: m = defaultdict(int) for x,y in peaks: tup = (x-y, x+y) m[tup] += 1 invs = [item for item in m if m[item] == 1] invs.sort(key=lambda x: (x[0], -x[1])) # print(invs) stack = [] for iv in invs: if not stack or iv[1] > stack[-1][-1]: stack.append(iv) return len(stack) def numberOfMountains3(self, peaks: List[int]) -> int: aux = defaultdict(int) for x,y in peaks: tup = (x-y, x+y) aux[tup] += 1 aux = {item for item in aux if aux[item] == 1} invs = sorted(aux, key=lambda x: [x[0], -x[1]]) # print(invs) stack = [] for a,b in invs: if not stack or b> stack[-1][-1]: stack.append((a,b)) return len(stack) def numberOfMountains4(self, peaks: List[int]) -> int: # correct solution invs = [(x-y, x+y) for x,y in peaks] invs.sort() s,e = -float('inf'), -float('inf') cnt = 0 for a, b in invs: if a == s: if b > e: s, e = a, b cnt += 1 if b == e: cnt -= 1 cnt = max(cnt, 0) else: if b > e: cnt += 1 s, e = a, b return cnt def numberOfMountains5(self, peaks: List[int]) -> int: count = Counter((x, y) for x, y in peaks) peaks = sorted([k for k, v in count.items() if v == 1]) stack = [] # returns True if `peak1` is hidden by `peak2` def isHidden(peak1: List[int], peak2: List[int]) -> bool: x1, y1 = peak1 x2, y2 = peak2 return x1 - y1 >= x2 - y2 and x1 + y1 <= x2 + y2 for i, peak in enumerate(peaks): while stack and isHidden(peaks[stack[-1]], peak): stack.pop() if stack and isHidden(peak, peaks[stack[-1]]): continue stack.append(i) return len(stack) if __name__ == "__main__": # print(Solution().numberOfMountains(peaks = [[2,2],[6,3],[5,4]])) # print(Solution().numberOfMountains(peaks = [[2,2],[6,3],[4,6]])) # print(Solution().numberOfMountains(peaks = [[1,3],[1,3]])) # print(Solution().numberOfMountains(peaks = [[1,3],[1,3], [1,2]])) # print(Solution().numberOfMountains(peaks = [[2,2],[6,3],[9,2],[10,3]])) # print(Solution().numberOfMountains(peaks = [[2,2],[6,3],[1,3],[1,3],[9,2],[10,3]])) # print(Solution().numberOfMountains2(peaks = [[2,2],[6,3],[5,4]])) # print(Solution().numberOfMountains2(peaks = [[2,2],[6,3],[4,6]])) # print(Solution().numberOfMountains2(peaks = [[1,3],[1,3]])) # print(Solution().numberOfMountains2(peaks = [[1,3],[1,3], [1,2]])) # print(Solution().numberOfMountains2(peaks = [[2,2],[6,3],[9,2],[10,3]])) # print(Solution().numberOfMountains2(peaks = [[2,2],[6,3],[1,3],[1,3],[9,2],[10,3]])) # print(Solution().numberOfMountains3(peaks = [[1,3],[1,3], [1,2]])) # print(Solution().numberOfMountains4(peaks = [[1,3],[1,3], [1,2]])) print(Solution().numberOfMountains3(peaks = [[2,2],[6,3],[5,4]])) print(Solution().numberOfMountains3(peaks = [[2,2],[6,3],[4,6]])) print(Solution().numberOfMountains3(peaks = [[1,3],[1,3]])) print(Solution().numberOfMountains3(peaks = [[1,3],[1,3], [1,2]])) print(Solution().numberOfMountains3(peaks = [[2,2],[6,3],[9,2],[10,3]])) print(Solution().numberOfMountains3(peaks = [[2,2],[6,3],[1,3],[1,3],[9,2],[10,3]])) print("*************************************") print(Solution().numberOfMountains4(peaks = [[2,2],[6,3],[5,4]])) print(Solution().numberOfMountains4(peaks = [[2,2],[6,3],[4,6]])) print(Solution().numberOfMountains4(peaks = [[1,3],[1,3]])) print(Solution().numberOfMountains4(peaks = [[1,3],[1,3], [1,2]])) print(Solution().numberOfMountains4(peaks = [[2,2],[6,3],[9,2],[10,3]])) print(Solution().numberOfMountains4(peaks = [[2,2],[6,3],[1,3],[1,3],[9,2],[10,3]])) print("*************************************") print(Solution().numberOfMountains5(peaks = [[2,2],[6,3],[5,4]])) print(Solution().numberOfMountains5(peaks = [[2,2],[6,3],[4,6]])) print(Solution().numberOfMountains5(peaks = [[1,3],[1,3]])) print(Solution().numberOfMountains5(peaks = [[1,3],[1,3], [1,2]])) print(Solution().numberOfMountains5(peaks = [[2,2],[6,3],[9,2],[10,3]])) print(Solution().numberOfMountains5(peaks = [[2,2],[6,3],[1,3],[1,3],[9,2],[10,3]]))
''' -Medium- *DP* *Prefix Sum* Given an array of integers arr, return the number of subarrays with an odd sum. Since the answer can be very large, return it modulo 109 + 7. Example 1: Input: arr = [1,3,5] Output: 4 Explanation: All subarrays are [[1],[1,3],[1,3,5],[3],[3,5],[5]] All sub-arrays sum are [1,4,9,3,8,5]. Odd sums are [1,9,3,5] so the answer is 4. Example 2: Input: arr = [2,4,6] Output: 0 Explanation: All subarrays are [[2],[2,4],[2,4,6],[4],[4,6],[6]] All sub-arrays sum are [2,6,12,4,10,6]. All sub-arrays have even sum and the answer is 0. Example 3: Input: arr = [1,2,3,4,5,6,7] Output: 16 Constraints: 1 <= arr.length <= 105 1 <= arr[i] <= 100 ''' from typing import List class Solution: def numOfSubarrays(self, arr: List[int]) -> int: odd, even, MOD, ans = 0, 0, 10**9+7, 0 for a in arr: if a % 2 == 1: # a is odd ans = (ans + even + 1) % MOD even, odd = odd, even + 1 else: ans = (ans + odd) % MOD even += 1 return ans if __name__ == "__main__": print(Solution().numOfSubarrays(arr = [1,3,5])) print(Solution().numOfSubarrays(arr = [2,4,6])) print(Solution().numOfSubarrays(arr = [1,2,3,4,5,6,7]))
''' Given a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary). You may assume that the intervals were initially sorted according to their start times. Example 1: Given intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9]. Example 2: Given [1,2],[3,5],[6,7],[8,10],[12,16], insert and merge [4,9] in as [1,2],[3,10],[12,16]. This is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10] ''' # Definition for an interval. class Interval(object): def __init__(self, s=0, e=0): self.start = s self.end = e # To print the result def __str__(self): return "[" + str(self.start) + "," + str(self.end) + "]" import sys class Solution(object): def insertSP(self, intervals, newInterval): s, e = newInterval.start, newInterval.end left, right = [], [] for i in intervals: if i.end < s: left += i, elif i.start > e: right += i, else: s = min(s, i.start) e = max(e, i.end) return left + [Interval(s, e)] + right def insert(self, intervals, newInterval): """ :type intervals: List[Interval] :type newInterval: Interval :rtype: List[Interval] """ res = [] i = 0 while i < len(intervals): iv = intervals[i] if iv.start > newInterval.end: break elif iv.end < newInterval.start: res.append(iv) else: newInterval.start = min(newInterval.start, iv.start) newInterval.end = max(newInterval.end, iv.end) i += 1 if i <= len(intervals): res.append(newInterval) while i < len(intervals): res.append(intervals[i]) i += 1 return res def insertNew(self, intervals, newInterval): """ :type intervals: List[List[int]] :type newInterval: List[int] :rtype: List[List[int]] """ res = [] i = 0 while i < len(intervals): iv = intervals[i] if iv[0] > newInterval[1]: break elif iv[1] < newInterval[0]: res.append(iv[:]) else: newInterval[0] = min(newInterval[0], iv[0]) newInterval[1] = max(newInterval[1], iv[1]) i += 1 #print(i) if i <= len(intervals): res.append(newInterval[:]) while i < len(intervals): res.append(intervals[i][:]) i += 1 return res if __name__ == "__main__": #intervals = Solution().insert([Interval(2, 6), Interval(8, 10), Interval(15, 18)], Interval(0, 1)) #intervals = Solution().insert([Interval(1, 3), Interval(8, 10), Interval(15, 18)], Interval(7, 9)) #intervals = Solution().insertSP([Interval(2, 6), Interval(8, 10), Interval(15, 18)], Interval(0, 1)) #for interval in intervals: # print(interval) print(Solution().insertNew([[1,2],[3,5],[6,7],[8,10],[12,16]],[4,8])) print(Solution().insertNew([[1,2],[3,5],[6,7],[8,10],[12,16]],[20,24]))
# -*- coding: utf-8 -*- """ Given a binary search tree, write a function kthSmallest to find the kth smallest element in it. Note: You may assume k is always valid, 1 ≤ k ≤ BST's total elements. Follow up: What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently? How would you optimize the kthSmallest routine? Hint: """ __author__ = 'Daniel' class TreeNode: def __init__(self, x): self.val = x self.leftNodes = 0 self.left = None self.right = None class Solution: def kthSmallest(self, root, k): stack = [] node = root while node: stack.append(node) node = node.left x = 1 while stack and x <= k: node = stack.pop() x += 1 right = node.right while right: stack.append(right) right = right.left return node.val def kthSmallestRecursive(self, root, k): """ :type root: TreeNode :type k: int :rtype: int """ l = self.cnt(root.left) print 'l = ', l if l+1 == k: return root.val elif l+1 < k: return self.kthSmallest(root.right, k-(l+1)) else: return self.kthSmallest(root.left, k) def cnt(self, root): if not root: return 0 return 1+self.cnt(root.left)+self.cnt(root.right) def leftCnt(self, root): if root: root.leftNodes = self.cnt(root.left) return root.leftNodes + 1 + self.leftCnt(root.right) else: return 0 def kthSmallestRecurClean(self, root, k): self.k = k self.res = None self.helper(root) return self.res def helper(self, node): if not node: return self.helper(node.left) #inorder, first left subtree self.k -= 1 # inorde, current node if self.k == 0: # at any given node, if k == 0, we have found the kth smallest, return self.res = node.val return self.helper(node.right) #inorder, lastly right subtree if __name__ == "__main__": root=TreeNode(70) root.left = TreeNode(31) root.left.right = TreeNode(40) root.left.right.right = TreeNode(45) root.right = TreeNode(93) root.right.left = TreeNode(73) root.right.right = TreeNode(95) root.left.left = TreeNode(14) root.left.left.right = TreeNode(23) #print Solution().kthSmallest(root, 4) print Solution().kthSmallestRecursive(root, 8) print Solution().cnt(root) Solution().leftCnt(root) print root.leftNodes
''' -Medium- Given the root of a binary tree, return the maximum width of the given tree. The maximum width of a tree is the maximum width among all levels. The width of one level is defined as the length between the end-nodes (the leftmost and rightmost non-null nodes), where the null nodes between the end-nodes are also counted into the length calculation. It is guaranteed that the answer will in the range of 32-bit signed integer. Example 1: Input: root = [1,3,2,5,3,null,9] Output: 4 Explanation: The maximum width existing in the third level with the length 4 (5,3,null,9). Example 2: Input: root = [1,3,null,5,3] Output: 2 Explanation: The maximum width existing in the third level with the length 2 (5,3). Example 3: Input: root = [1,3,2,5] Output: 2 Explanation: The maximum width existing in the second level with the length 2 (3,2). Example 4: Input: root = [1,3,2,5,null,null,9,6,null,null,7] Output: 8 Explanation: The maximum width existing in the fourth level with the length 8 (6,null,null,null,null,null,null,7). Constraints: The number of nodes in the tree is in the range [1, 3000]. -100 <= Node.val <= 100 ''' # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right from typing import Optional from BinaryTree import null, TreeNode, constructBinaryTree from collections import deque class Solution: def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int: que = deque([(root,0)]) res = 1 while que: nxt = deque() i, j = -1, -1 for _ in range(len(que)): node, lvl = que.popleft() if node.left: idx = 2*lvl if i == -1: i = idx else: j = idx nxt.append((node.left,idx)) if node.right: idx = 2*lvl+1 if i == -1: i = idx else: j = idx nxt.append((node.right, idx)) if i >= 0 and j >= 0: res = max(res, j-i+1) que = nxt return res if __name__ == "__main__": root = constructBinaryTree([1,3,2,5,3,null,9]) print(Solution().widthOfBinaryTree(root)) root = constructBinaryTree([1,3,null,5,3]) root.prettyPrint() print(Solution().widthOfBinaryTree(root)) root = constructBinaryTree([1,3,2,5]) root.prettyPrint() print(Solution().widthOfBinaryTree(root)) root = constructBinaryTree([1,3,2,5,null,null,9,6,null,null,null,null,null,null,7]) root.prettyPrint() print(Solution().widthOfBinaryTree(root))
''' -Medium- *Greedy* *Set* You are given an integer array banned and two integers n and maxSum. You are choosing some number of integers following the below rules: The chosen integers have to be in the range [1, n]. Each integer can be chosen at most once. The chosen integers should not be in the array banned. The sum of the chosen integers should not exceed maxSum. Return the maximum number of integers you can choose following the mentioned rules. Example 1: Input: banned = [1,6,5], n = 5, maxSum = 6 Output: 2 Explanation: You can choose the integers 2 and 4. 2 and 4 are from the range [1, 5], both did not appear in banned, and their sum is 6, which did not exceed maxSum. Example 2: Input: banned = [1,2,3,4,5,6,7], n = 8, maxSum = 1 Output: 0 Explanation: You cannot choose any integer while following the mentioned conditions. Example 3: Input: banned = [11], n = 7, maxSum = 50 Output: 7 Explanation: You can choose the integers 1, 2, 3, 4, 5, 6, and 7. They are from the range [1, 7], all did not appear in banned, and their sum is 28, which did not exceed maxSum. Constraints: 1 <= banned.length <= 104 1 <= banned[i], n <= 104 1 <= maxSum <= 109 ''' from typing import List class Solution: def maxCount(self, banned: List[int], n: int, maxSum: int) -> int: B = set(banned) i, t, ans = 1, 0, 0 while i <= n: if i not in B and t + i <= maxSum: t += i ans += 1 if t > maxSum: break i += 1 return ans if __name__ == '__main__': print(Solution().maxCount(banned = [1,2,3,4,5,6,7], n = 8, maxSum = 1))
''' -Medium- *Reservoir sampling* Given an array of integers with possible duplicates, randomly output the index of a given target number. You can assume that the given target number must exist in the array. Note: The array size can be very large. Solution that uses too much extra space will not pass the judge. Example: int[] nums = new int[] {1,2,3,3,3}; Solution solution = new Solution(nums); // pick(3) should return either index 2, 3, or 4 randomly. Each index should have equal probability of returning. solution.pick(3); // pick(1) should return 0. Since in the array only nums[0] is equal to 1. solution.pick(1); ''' from random import randint class Solution(object): def __init__(self, nums): """ :type nums: List[int] """ self.nums = nums def pick(self, target): """ :type target: int :rtype: int """ count = 0 res = 0 for i,n in enumerate(self.nums): if n == target: count += 1 chance = randint(1,count) if chance == 1: res = i return res if __name__ == "__main__": # Your Solution object will be instantiated and called as such: nums = [1,2,3,3,3] obj = Solution(nums) print(obj.pick(3)) print(obj.pick(3)) print(obj.pick(3))
''' -Medium- *DFS* *Backtracking* Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target. The same repeated number may be chosen from candidates unlimited number of times. Note: All numbers (including target) will be positive integers. The solution set must not contain duplicate combinations. Example 1: Input: candidates = [2,3,6,7], target = 7, A solution set is: [ [7], [2,2,3] ] Example 2: Input: candidates = [2,3,5], target = 8, A solution set is: [ [2,2,2,2], [2,3,3], [3,5] ] ''' class Solution(object): def combinationSum(self, candidates, target): """ :type candidates: List[int] :type target: int :rtype: List[List[int]] """ res = [] candidates.sort() self.dfs(candidates, 0, [], res, target) return res def dfs(self, nums, start, path, result, target): if not target: result.append(path) return for i in range(start, len(nums)): # Very important here! We don't use `i > 0` because we always want # to count the first element in this recursive step even if it is the same # as one before. To avoid overcounting, we just ignore the duplicates # after the first element. #if i > start and nums[i] == nums[i - 1]: # continue # If the current element is bigger than the assigned target, there is # no need to keep searching, since all the numbers are positive if nums[i] > target: break # We set the start to `i` because one element could # be used more than once self.dfs(nums, i, path + [nums[i]], result, target - nums[i]) print(Solution().combinationSum([2,3,6,7], 7)) print(Solution().combinationSum([2,3,5], 8))
''' -Medium- *Topological Sort* There are a total of numCourses courses you have to take, labeled from 0 to numCourses - 1. You are given an array prerequisites where prerequisites[i] = [ai, bi] indicates that you must take course ai first if you want to take course bi. For example, the pair [0, 1] indicates that you have to take course 0 before you can take course 1. Prerequisites can also be indirect. If course a is a prerequisite of course b, and course b is a prerequisite of course c, then course a is a prerequisite of course c. You are also given an array queries where queries[j] = [uj, vj]. For the jth query, you should answer whether course uj is a prerequisite of course vj or not. Return a boolean array answer, where answer[j] is the answer to the jth query. Example 1: Input: numCourses = 2, prerequisites = [[1,0]], queries = [[0,1],[1,0]] Output: [false,true] Explanation: The pair [1, 0] indicates that you have to take course 1 before you can take course 0. Course 0 is not a prerequisite of course 1, but the opposite is true. Example 2: Input: numCourses = 2, prerequisites = [], queries = [[1,0],[0,1]] Output: [false,false] Explanation: There are no prerequisites, and each course is independent. Example 3: Input: numCourses = 3, prerequisites = [[1,2],[1,0],[2,0]], queries = [[1,0],[1,2]] Output: [true,true] Constraints: 2 <= numCourses <= 100 0 <= prerequisites.length <= (numCourses * (numCourses - 1) / 2) prerequisites[i].length == 2 0 <= ai, bi <= n - 1 ai != bi All the pairs [ai, bi] are unique. The prerequisites graph has no cycles. 1 <= queries.length <= 104 0 <= ui, vi <= n - 1 ui != vi ''' from typing import List from collections import defaultdict, deque class Solution: def checkIfPrerequisite(self, numCourses: int, prerequisites: List[List[int]], queries: List[List[int]]) -> List[bool]: n = numCourses graph = defaultdict(list) indeg = [0]*n for u,v in prerequisites: graph[u].append(v) indeg[v] += 1 deps = [set() for _ in range(n)] que = deque() for i in range(n): if indeg[i] == 0: que.append(i) while que: u = que.popleft() for v in graph[u]: deps[v].add(u) for w in deps[u]: deps[v].add(w) indeg[v] -= 1 if indeg[v] == 0: que.append(v) ans = [] for u,v in queries: if u in deps[v]: ans.append(True) else: ans.append(False) return ans if __name__ == "__main__": print(Solution().checkIfPrerequisite(numCourses = 2, prerequisites = [[1,0]], queries = [[0,1],[1,0]])) print(Solution().checkIfPrerequisite(numCourses = 2, prerequisites = [], queries = [[1,0],[0,1]])) pre = [[6,3],[6,8],[6,5],[6,10],[6,0],[6,7],[6,4],[6,9],[6,1],[3,8],[3,10],[3,0],[3,7],[3,4],[3,2],[3,9],[3,1],[8,5],[8,10],[8,4],[8,2],[8,9],[5,10],[5,7],[5,4],[5,9],[5,1],[10,0],[10,7],[10,4],[10,2],[10,9],[0,7],[0,4],[0,2],[7,2],[7,9],[7,1],[4,2],[4,9],[4,1],[2,9],[2,1]] query = [[2,1],[8,9],[6,7],[3,8],[4,10],[9,6],[4,2],[5,10],[3,5],[5,9],[10,7],[7,6],[7,10],[0,5],[2,8],[6,2],[9,7],[9,4],[5,0],[9,5],[0,9],[6,10],[8,9],[5,8],[8,9],[4,5],[1,10],[6,5],[5,9],[0,9],[2,6],[4,5],[9,1],[8,1],[9,10],[4,6],[6,4],[5,9],[7,1],[10,1],[9,6],[1,3],[2,0],[9,10],[5,9],[7,5],[9,6],[1,4],[3,1],[10,4],[5,6],[1,4],[4,3],[9,5],[4,5],[5,8],[5,6],[9,10],[9,10],[7,8],[5,6],[4,6],[3,5],[7,10],[8,10],[7,8],[0,4],[7,0],[8,3],[8,10],[2,4],[6,10],[0,1],[10,6],[7,2],[4,3],[2,3],[3,1],[1,4],[5,7],[4,10],[7,2],[6,8],[0,8],[4,3],[8,7],[0,3],[10,9],[5,7],[6,8],[8,5],[3,5],[9,5],[7,9],[7,9],[3,4],[7,6],[3,9],[2,0],[10,6],[7,6],[10,6],[4,3],[9,10],[3,7],[7,10],[6,1]] print(Solution().checkIfPrerequisite(numCourses = 11, prerequisites = pre, queries= query))
''' -Hard- *DP* A die simulator generates a random number from 1 to 6 for each roll. You introduced a constraint to the generator such that it cannot roll the number i more than rollMax[i] (1-indexed) consecutive times. Given an array of integers rollMax and an integer n, return the number of distinct sequences that can be obtained with exact n rolls. Since the answer may be too large, return it modulo 109 + 7. Two sequences are considered different if at least one element differs from each other. Example 1: Input: n = 2, rollMax = [1,1,2,2,2,3] Output: 34 Explanation: There will be 2 rolls of die, if there are no constraints on the die, there are 6 * 6 = 36 possible combinations. In this case, looking at rollMax array, the numbers 1 and 2 appear at most once consecutively, therefore sequences (1,1) and (2,2) cannot occur, so the final answer is 36-2 = 34. Example 2: Input: n = 2, rollMax = [1,1,1,1,1,1] Output: 30 Example 3: Input: n = 3, rollMax = [1,1,1,2,2,3] Output: 181 Constraints: 1 <= n <= 5000 rollMax.length == 6 1 <= rollMax[i] <= 15 ''' from typing import List class Solution: def dieSimulator(self, n: int, rollMax: List[int]) -> int: faces = len(rollMax) # [n + 1][faces + 1] dimensional dp array dp = [[0 for i in range(faces + 1)] for j in range(n + 1)] # initialization # roll 0 times, the total combination is 1 dp[0][faces] = 1 # roll 1 times, the combinations that end at face j is 1 for j in range(faces): dp[1][j] = 1 # roll 1 times, the total combination is faces = 6 dp[1][faces] = faces # then roll dices from 2 times, until n times for i in range(2, n + 1): # iterate through each column (face) for j in range(faces): # at each [i, j], trying to go up (decrease i) and collect all the sum of previous state for k in range(1, rollMax[j] + 1): if i - k < 0: break dp[i][j] += dp[i - k][faces] - dp[i - k][j] # update total sum of this row dp[i][faces] = sum(dp[i]) return dp[n][faces] % 1000000007 if __name__ == "__main__": print(Solution().dieSimulator(n = 2, rollMax = [1,1,2,2,2,3]))
''' -Medium- Given n points on a 2D plane where points[i] = [xi, yi], Return the widest vertical area between two points such that no points are inside the area. A vertical area is an area of fixed-width extending infinitely along the y-axis (i.e., infinite height). The widest vertical area is the one with the maximum width. Note that points on the edge of a vertical area are not considered included in the area. Example 1: ​ Input: points = [[8,7],[9,9],[7,4],[9,7]] Output: 1 Explanation: Both the red and the blue area are optimal. Example 2: Input: points = [[3,1],[9,0],[1,0],[1,4],[5,3],[8,8]] Output: 3 Constraints: n == points.length 2 <= n <= 105 points[i].length == 2 0 <= xi, yi <= 109 ''' from typing import List class Solution: def maxWidthOfVerticalArea(self, points: List[List[int]]) -> int: P = points P.sort() ans = 0 for i in range(1, len(P)): ans = max(ans, P[i][0]-P[i-1][0]) return ans if __name__ == "__main__": print(Solution().maxWidthOfVerticalArea(points = [[8,7],[9,9],[7,4],[9,7]])) print(Solution().maxWidthOfVerticalArea(points = [[3,1],[9,0],[1,0],[1,4],[5,3],[8,8]]))
def median(lst): new_lst = sorted(lst) half_loc = len(new_lst) // 2 if len(lst) % 2 != 0: result = new_lst[half_loc] else: result = (new_lst[half_loc] + new_lst[half_loc - 1]) / 2.0 print (new_lst) return result print(median([7, 12, 3, 1, 6, 8, 9, 10]))
def make_word_dict(): t = {} fin = open('words.txt') for line in fin: word = line.strip().lower() key = tuple(sorted(tuple(word))) if key in t: pass else: t[key] = [] return t def make_word_list(): t = [] fin = open('words.txt') for line in fin: word = line.strip().lower() t.append(word) return t def assign_words_to_letters(word_list,letters): d = letters for word in word_list: t = tuple(sorted(tuple(word))) d[t].append(word) return d def anagram_sets(d): anagrams = {} for key in d.keys(): if len(d[key])>1: anagrams[key]=d[key] else: pass return anagrams def largest_sets(anagrams): gram = [] for val in anagrams.values(): gram.append((len(val),val)) gram.sort(reverse=True) sort_gram = [] for length,value in gram: sort_gram.append(value) return sort_gram def maxLen(t): lengths = [] for value in t: lengths.append(len(value)) biggest = max(lengths) return biggest def largest_bingo(anagrams): gram = [] for val in anagrams.values(): gram.append((len(val),val)) gram.sort(reverse=True) sort_gram = [] largest_bingo = [] for length,value in gram: sort_gram.append(value) for value in sort_gram: if len(value)==maxLen(sort_gram): largest_bingo.append(value) t = largest_bingo[0] letters = tuple(t[0]) return sorted(letters) def bingo(anagrams): bingo = {} for key in anagrams.keys(): if len(key)==8: bingo[key] = anagrams[key] return bingo if __name__ == '__main__': letters = make_word_dict() word_list = make_word_list() anagrams = assign_words_to_letters(word_list,letters) grams = anagram_sets(anagrams) bing = bingo(grams) print largest_bingo(bing)
from inlist import* def reverse_pair(word_list,word): rev_word = word[::-1] return in_bisect(word_list,rev_word) for word in fin: if word[::-1] in d.keys(): rev_words.append(word) return rev_words def make_word_list(): stuff = list(open('words.txt')) fin = map(str.strip, stuff) return fin if __name__ == '__main__': word_list = make_word_list() for word in word_list: if reverse_pair(word_list,word): print word, word[::-1]
# import our required packages from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import UserMixin, LoginManager from werkzeug.security import generate_password_hash # import python modules for our database models # we'll talk about security in a minute login = LoginManager() @login.user_loader def load_user(user_id): return User.query.get(user_id) # instantiate a database instance db = SQLAlchemy() # define a model - results in a database table # whats happening in the parenthesis in the class line? We're inheriting behavior from a sqlalchemy class -> aka we're telling the computer "hey, this class is a database model" class Animal(db.Model): # kind of similar to our CREATE TABLE queries -> we're telling the database what columns/attributes go into this table/model id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(50), nullable=False, unique=True) weight = db.Column(db.Integer, nullable=True, default='Unknown') height = db.Column(db.Integer, nullable=True, default='Unknown') climate = db.Column(db.String(50), nullable=True, default='all climates') region = db.Column(db.String(50)) def __repr__(self): return f"<Animal: {self.name}>" class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(15), nullable=False, unique=True) email = db.Column(db.String(150), nullable=False, unique=True) password = db.Column(db.String(254), nullable=False) def __init__(self, username, password, email): self.username = username self.password = generate_password_hash(password) self.email = email def __repr__(self): return f"<User: {self.username}>"
#!/usr/bin/python3 Rectangle = __import__('3-rectangle').Rectangle def main(): my_rectangle = Rectangle(2, 4) print("Area: {} - Perimeter: {}".format(my_rectangle.area(), my_rectangle.perimeter())) print(str(my_rectangle)) print(repr(my_rectangle)) print("--") my_rectangle.width = 10 my_rectangle.height = 3 print(my_rectangle) print(repr(my_rectangle)) print("usr tst") r = Rectangle() print(r.width) print(r.height) print(str(r)) print(r) test(1,1) test([],1) test(-1,1) test(1,[]) test(1,-1) def test(a,b): try: Rectangle(a,b) except Exception as e: print("{}: {}".format(type(e).__name__, e)) main()
#!/usr/bin/python3 safe_print_list_integers = \ __import__('2-safe_print_list_integers').safe_print_list_integers my_list = [1, 2, 3, 4, 5] nb_print = safe_print_list_integers(my_list, 2) print("nb_print: {:d}".format(nb_print)) my_list = [1, 2, 3, "Holberton", 4, 5, [1, 2, 3]] nb_print = safe_print_list_integers(my_list, len(my_list)) print("nb_print: {:d}".format(nb_print)) nb_print = safe_print_list_integers(my_list, len(my_list) + 2) print("nb_print: {:d}".format(nb_print)) nb_print = safe_print_list_integers([[], 10], len(my_list) + 2) print("nb_print: {:d}".format(nb_print)) nb_print = safe_print_list_integers([10, "hello", 10, 10, [], 10], 20) print("nb_print: {:d}".format(nb_print))
class GameState(object): def __init__(self): self.Board = Gameboard() self.Players = self.initialize_players() def initialize_players(self): positions = [(0, 0), (0, 3), (0, 6), (0, 9), (9, 0), (9, 3), (9, 6), (9, 9)] Players = [] for pos in positions: Players.append(new Player(pos, 10, 10, 10, 10, None)) return Players
import heapq class Node(): def __init__(self, weight, label=None, left_child=None, right_child=None): self.weight = weight self.label = label self.left_child = left_child self.right_child = right_child def merge(self, other): parent_weight = self.weight + other.weight parent_node = Node(parent_weight,left_child=self, right_child=other) return parent_node def __eq__(self, other): return self.weight == other.weight def __lt__(self, other): return self.weight < other.weight def __le__(self, other): return self.weight <= other.weight def __gt__(self, other): return self.weight > other.weight def __ge__(self, other): return self.weight >= other.weight class Encoding(): def __init__(self, root_node): self.encoding = {} self.get_encoding(root_node, "") def get_encoding(self, node, code): if node.label: self.encoding[node.label] = code return self.get_encoding(node.left_child, "0" + code) self.get_encoding(node.right_child, "1" + code) def huffman_coding(weights): heap = [Node(weights[i], label=i) for i in weights.keys()] heapq.heapify(heap) while len(heap) > 1: n1 = heapq.heappop(heap) n2 = heapq.heappop(heap) new_root = n1.merge(n2) heapq.heappush(heap, new_root) E = Encoding(new_root) return E.encoding if __name__ == "__main__": weights = {} with open("huffman.txt") as f: for i, line in enumerate(f): if i > 0: weight = int(line.strip()) weights[i] = weight encoding = huffman_coding(weights) lengths = [len(encoding[i]) for i in encoding.keys()] print(max(lengths)) print(min(lengths))
import os from collections import defaultdict import heapq def create_list(file_name): this_folder = os.path.dirname(os.path.abspath(__file__)) my_file = os.path.join(this_folder, file_name) list = [] with open(my_file) as f: for line in f: list.append(int(line.strip())) return list def median_maintenance(item, low_heap, high_heap): """Inserts an item into corresponding heap and returns the median""" # If heaps are empty if not high_heap and not low_heap: heapq.heappush(high_heap, item) return item # Insert into correct place if item > high_heap[0]: heapq.heappush(high_heap, item) else: heapq.heappush(low_heap, -item) # Balance heaps: if len(low_heap) < len(high_heap): while len(low_heap) < len(high_heap) - 1: transfer_item = heapq.heappop(high_heap) heapq.heappush(low_heap, -transfer_item) elif len(high_heap) < len(low_heap): while len(high_heap) < len(low_heap): transfer_item = -heapq.heappop(low_heap) heapq.heappush(high_heap, transfer_item) if (len(high_heap) + len(low_heap)) % 2 == 0: return -low_heap[0] else: return high_heap[0] if __name__ == "__main__": items = create_list("Median.txt") low_heap = [] high_heap = [] median_sum = 0 for item in items: median_sum += median_maintenance(item, low_heap, high_heap) print(median_sum % 10000)
# Optional Parameteres Tutorial #1 def func1(x=5): return x **2 def func2(word,add=5,freq=2): print(word*(freq+add)) call = func2("indra",0,1) class car(object): def __init__(self,make,model,year,condition="New",kms=0): self.make = make self.model = model self.year = year self.condition = condition self.kms = kms def display(self,showAll): if showAll : print("This car is a %s %s from %s, it is %s and has %s kms." %(self.make,self.model,self.year,self.condition,self.kms)) else: print("this car is a %s %s from %s." %(self.make,self.model,self.year)) whip = car("ford","Fussion",2012) whip.display(False)
# Functions def addTwo(x): return x + 2 def subtractTwo(number): return number - 2 def accel(mass,force): a = mass * force return a newNumber = addTwo(7) print(newNumber) def printString(string): print(string) printString("My name is Indra") y = accel(2,3) print(y)
# .strip(), len(), lower(), .upper(), .split() text = input("Please input something :") print(text.split()) # split the string deafult by space print(text.strip()) # clear the last space print(len(text)) print(text.lower()) print(text.upper()) print(text.split(". "))
""" Distribution Plots - Using Seaborn and Python to Visualize Datasets ----------------------------------------------------------------------------- Gianluca Capraro Created: April 2019 ----------------------------------------------------------------------------- This script will demonstrate the use of the Seaborn, Matplotlib, and Numpy libraries to load, manipulate, and visualize data with Python. The data is loaded from Seaborn's publicly available data. It contains data related to restaurant bills, the associated tip, party size, paying customer's gender, their status as a smoker, the day, and time. The examples contained in this script will demonstrate use of: - Distribution Plots Types of distribution plots available in Seaborn are: ----------------------------------------------------------------------------- - distplot(col of data, num bins, histogram bool, kde bool, etc.) - jointplot(x col, y col, dataset, kind, color, dropna) - kind could be 'scatter','reg','resid', 'kde', 'hex' - pairplot(data, hue, hue_order, palette) - rugplot(col of data, height, axis) - kdeplot(data, second set of data, shade bool, vertical bool) For all method parameters, refer to Seaborn official documentation. ----------------------------------------------------------------------------- """ #import the necessary libraries import seaborn as sns import matplotlib.pyplot as plt import numpy as np #load the dataset tips_data = sns.load_dataset('tips') #print out the head of the data to terminal to understand columns print('\nTips Dataset Head:') print(tips_data.head()) print('\n') """ ---------------------------------------------------------------------------- Distplots - useful for analyzing feature distributions ---------------------------------------------------------------------------- """ #create a distribution plot of the total bill data with only the data parameter print('Showing Distribution of Total Bill...') sns.distplot(tips_data['total_bill']) plt.show() print('\n') #create a distribution plot without the kde layer, show only the histogram, add more bins #add a title and x label to the plot print('Showing Histogram of Total Bill data, KDE=False...') sns.distplot(tips_data['total_bill'], kde=False, bins=30) plt.title('Distribution of Total Bills') plt.xlabel('Total Bill ($)') plt.show() print('\n') """ ---------------------------------------------------------------------------- Jointplots - useful for comparing two different data features ---------------------------------------------------------------------------- """ #create a jointplot, scatter kind, comparing total bill on x axis to the tip amount on y axis print('Showing Jointplot of Total Bill vs. Tip Amount...') sns.jointplot(x='total_bill',y='tip',data=tips_data,kind='scatter') plt.show() print('\n') #repeat this type of plot, however this time change the kind type to 'hex' print('Showing Jointplot (Hex) of Total Bill vs. Tip Amount...') sns.jointplot(x='total_bill',y='tip',data=tips_data,kind='hex') plt.show() print('\n') #this time change the kind type to 'reg' print('Showing Jointplot (Linear Reg) of Total Bill vs. Tip Amount...') sns.jointplot(x='total_bill',y='tip',data=tips_data,kind='reg') plt.show() print('\n') """ ---------------------------------------------------------------------------- Pairplot - useful for analyzing all relationships in data quickly ---------------------------------------------------------------------------- """ #create a pairplot of all relationships in tips data print('Showing Pairplot of Tips Dataset...') sns.pairplot(tips_data) plt.show() print('\n') #create the same pairplot, however segment this data based on gender #use the palette parameter to adjust the color scheme #change diagonal kind to show histograms print('Showing Pairplot of Tips Dataset Segmented by Gender...') sns.pairplot(tips_data, hue = 'sex', palette = 'coolwarm', diag_kind = 'hist') plt.show() print('\n') """ ---------------------------------------------------------------------------- Rugplot - show a dash mark for every point on a univariate distribution - a building block of the KDE plot shown later ---------------------------------------------------------------------------- """ #create a rugplot print('Showing Rugplot of Total Bills...') sns.rugplot(tips_data['total_bill']) plt.title('Rugplot of Total Bills') plt.xlabel('Total Bill ($)') plt.show() print('\n') """ --------------------------------------------------------------------------------------- KDE Plot - kernel density estimation plot - replace every observation of data with normal distribution around that value --------------------------------------------------------------------------------------- """ #create a kde plot with seaborn print('Showing KDE Plot of Total Bills...') sns.kdeplot(tips_data['total_bill']) plt.title('KDE Plot of Total Bills') plt.xlabel('Total Bill ($)') plt.show() print('\n') #show rugplot with the kde plot to visualize their relationship print('Showing KDE and Rug Plots of Total Bills...') sns.kdeplot(tips_data['total_bill']) sns.rugplot(tips_data['total_bill']) plt.title('KDE and Rug Plots of Total Bills') plt.xlabel('Total Bill ($)') plt.show() print('\n') #show the same, but for data related to tip amount print('Showing KDE and Rug Plots of Tip Amounts...') sns.kdeplot(tips_data['tip']) sns.rugplot(tips_data['tip']) plt.title('KDE and Rug Plots of Tip Amounts') plt.xlabel('Tip Amount ($)') plt.show() print('\n') """ --------------------------------------------------------------------------------------- CONCLUSION --------------------------------------------------------------------------------------- In this script we have demonstrated how Seaborn can be used with Python to read data and create several different 'kinds' of distribution plots. --------------------------------------------------------------------------------------- """
import string def print_rangoli(size): width = 4 * size - 3 alpha = string.ascii_lowercase print(alpha) for i in list(range(size))[::-1] + list(range(1, size)): print('-'.join(alpha[size-1:i:-1] + alpha[i:size]).center(width, '-')) if __name__ == '__main__': n = int(input()) print_rangoli(n)
n = int(input().strip()) if not (n % 2 == 0): print("Weird") elif (2 <= n <= 5): print("Not Weird") elif (6 <= n <= 20): print("Weird") else: print("Not Weird")
from collections import deque d = deque() for i in range(int(input())): command = input().split() if command[0] == 'append': d.append(command[1]) elif command[0] == 'appendleft': d.appendleft(command[1]) elif command[0] == 'pop': d.pop() else: d.popleft() print(' '.join(d))
def remove(str,start,num): str3="" str1=list(str) for all in range(0,num): for each in range(0,len(str)): if each==start: del str1[each] break for i in str1: str3+=i print(str3) str2=input("Enter the string:") num=int(input("Enter the number of characters to be removed:")) begin=int(input("enter the start point:")) remove(str2,begin,num)
# get and print the string in X format for row in range(7): for col in range(7): if row==0 and col==0 or row==0 and col==6: print("a",end="") elif row==1 and col==1 or row==1 and col==5: print("n",end="") elif row==2 and col==2 or row==2 and col==4: print("d",end="") elif row==3 and col==3: print("r",end="") elif row==4 and col==2 or row==4 and col==4: print("o",end="") elif row==5 and col==1 or row==5 and col==5: print("i",end="") elif row==6 and col==0 or row==6 and col==6: print("d",end="") else: print(end="") print()
def solve(s): s = s.capitalize() kt = False for i in range(len(s)): if (s[i] == " "): kt = True else: if (kt): s = s[:i] + s[i:].capitalize() kt = False return s s = "chris alan 12 alo" print (solve(s))
def split_and_join(line): s = line.split(' ') s1 = "-".join(s) return s1 print (split_and_join('this is a string'))
if __name__ == '__main__': N = int(input()) result = [] for i in range(N): doing, *arr = input().split() arr = list(map(int, arr)) if (doing == "insert"): result.insert(arr[0], arr[1]) elif (doing == "print"): print (result) elif (doing == "remove"): result.remove(arr[0]) elif (doing == "append"): result.append(arr[0]) elif (doing == "sort"): result.sort() elif (doing == "pop"): result.pop() elif (doing == "reverse"): result.reverse()
import math ''' convert given latitude or longitude in decimal to radians input params: deg returns coordinates in radians ''' def deg2rad(deg): return float(deg) * (math.pi/180) ''' calculate the distance between dublin and the customer input params: lat1, lon1 (dublin coordinates), lat2, lon2 (customer coordinates) returns: distance between two coordinates ''' def distanceCal(lat1, lon1, lat2, lon2): lat2 = float(lat2) lon2 = float(lon2) R = 6371 # radius of earth dlat = deg2rad(lat2-lat1) dlon = deg2rad(lon2-lon1) a = math.sin(dlat/2) * math.sin(dlat/2) + \ math.cos(deg2rad(lat1)) * math.cos(deg2rad(lat2)) * \ math.sin(dlon/2) * math.sin(dlon/2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) d = R * c return d ''' sort the customers to be invited by their user id input params: list of dictionaries containing customer user id and name returns: list of dictionaries sorted by user id in ascending ''' def sort_customers(cust_invite): return sorted(cust_invite, key=lambda x: x["user_id"])
def multiply(a, b): return (float(a) * float(b)) def divide(a, b): return (float(a) / float(b)) expr_map = { '*': multiply, '/': divide } def evaluate(routes, route, value): if route not in routes: print 'route: ' + str(route) + ' is not configured in routes' exp = routes[route]['expr'] magnitude = routes[route]['magnitude'] if exp not in expr_map: print 'operation not defined for the expression: ' + str(exp) method = expr_map[exp] result = method(value, magnitude) return result
arr = [2, 1, 3, 13, 5, 17, 67, 1] def change(first, sec): tmp = arr[first] arr[first] = arr[sec] arr[sec] = tmp if __name__ == '__main__': for i in range(1, len(arr)): for j in range(i, 0, -1): if arr[j] < arr[j-1]: change(j-1, j) print('отсортированный массив: {}'.format(arr))
def centuryFromYear(year): s = str(year) if len(s) == 3 res = int(s[0]) else res = int(s[0:2]) if s[-1] + s[-2] = '00' res += 1 return res
for i in range(3): print(f"to jest iteracja numer {i}") i += 1 film = "Matrix" licznik = 0 for i in film: if licznik % 2: print(i) if i == "i": continue #break# print(i) for index, literka in enumerate(film): print(index, literka) for krotka in enumerate(film, start=12): print(krotka)
# Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: # @param {TreeNode} root # @return {boolean} def isBalanced(self, root): def getDepth(node): if node == None: return 0 return 1 + max(getDepth(node.left), getDepth(node.right)) if root == None: return True return (self.isBalanced(root.left) and self.isBalanced(root.right) and abs(getDepth(root.left) - getDepth(root.right)) <=1)
# Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: # @param {ListNode} l1 # @param {ListNode} l2 # @return {ListNode} def addTwoNumbers(self, l1, l2): if l1 == None: return l2 if l2 == None: return l1 s = l1.val + l2.val head = ListNode(s % 10) remaining = self.addTwoNumbers(l1.next, l2.next) if s > 9: remaining = self.addTwoNumbers(remaining, ListNode(1)) head.next = remaining return head
def fibonacci(): x, y = 1, 1 sums = 0 while x <= 4000000: if x % 2 == 0: sums += x x, y = y, x+y return sums print fibonacci()
# csvからデータをとりこんで、コース番号を入力するとそのコースの特定の商品の合計値の10%を計算。 #csvを読み込んで出力する import csv file_path = "コース別商品売上管理表.csv" class itaku_bonus: def __init__(self,course): self.name = course def course_num(self): return self.name def sum(self): with open(file_path, newline='', encoding='utf-8') as csvfile: kekka = 0 # csvの最初の三行を飛ばすための処理 next(csvfile) next(csvfile) next(csvfile) reader = csv.DictReader(csvfile) for row in reader: course_name = row['コース名'] num_course = row['コース'] # nonetypeエラーが出るのでエラーを無視するコード try: num_product = int(row['商品コード']) # 商品コードが55以下の数量をすべて足す if num_course == self.name and num_product > 55: kekka += int(row['総売上金額']) except: pass return kekka / 10 course_num = input('計算したいコース番号を半角数字で入力') course_120 = itaku_bonus(course_num) print(course_120.sum())
#!/usr/bin python2.7 # -*- coding:utf-8 -*- # 1观察作用域 '''a = "this is a golbal variable" def foo(): print locals() print globals() foo() ''' # 2命名空间 '''a = "this is a golbal variable" def foo(): #a = "text" #函数内的变量是局部变量是单独定义的新变量和全局变量不是同一个 print locals() print a foo() print a ''' # 3变量生存周期 '''def foo(): x = 1 def foo2(): print x return None foo() ''' # 4函数参数 '''def foo(x): print locals() foo(1) def foo(a,b,**c): print a,b print c for x in c: print x,':',str(c[x]) foo(1,2,d = 'hello',c = 200) ''' # 5嵌套函数 '''y = 2 def outer(x): x = 1 def inner(): print x print y inner() #类似于普通调用全局变量,但是所有函数都在全局中 outer(3) ''' # 6函数是一级类对象 '''print issubclass(int,object) def foo(): pass print foo.__class__ print issubclass(foo.__class__,object) def add(x,y): return x+y def sub(x,y): return x-y def apply(a,x,y): return a(x,y) print apply(add,10,1) print apply(sub,10,9) ''' # 7闭包 '''def outer(): x = 1 def inner(): print x return inner foo = outer() foo() def outer(x): def inner(y): print '1',y**x return '2',y**x return inner foo = outer(4) print '3',foo(2) ''' # 8装饰器 '''def outer(some_func): def inner(): print "befor some_func" ret = some_func() return ret + 1 return inner def foo(): return 1 decorated = outer(foo) print decorated() print '//////////////////////' class Coordinate(object): def __init__(self,x,y): self.x = x self.y = y def __repr__(self): return "Coord: " + str(self.__dict__) # 一般方法 def add(a,b): return Coordinate(a.x + b.x, a.y + b.y) def sub(a,b): return Coordinate(a.x - b.x, a.y- b.y) one = Coordinate(100 , 200) two = Coordinate(300 , 200) three = Coordinate(-100, -100) print add (one, two) c = Coordinate(1,2) # 装饰器 def wrapper(func): def checker(a, b): if a.x < 0 or a.y < 0: a = Coordinate(a.x if a.x > 0 else 0, a.y if a.y > o else 0) if b.x < 0 or b.y < 0: b = Coordinate(b.x if b.x > 0 else 0, b.y if b.y > 0 else 0) ret = func(a, b) if ret.x < 0 or ret.y < 0: ret = Coordinate(ret.x if ret.x > 0 else 0, ret.y if ret.y > 0 else 0) return ret return checker add = wrapper(add) sub = wrapper(sub) print sub (one, two),add (one,three) # 9 @标识符将装饰器应用到函数 @wrapper def add(a, b): return Coordinate(a.x + b.x, a.y + b.y) ''' # 10 *arg & **kwargs '''def one(*args): print args one() one(1,2,3) def two(x, y, *args): print x, y, args two('a','b','c') def add(x, y): return x + y lst = [1,2] print add(lst[0], lst[1]) print add(*lst) def foo(**kwargs): print kwargs foo() foo(x = 1, y = 2) dct = {'x': 1, 'y' : 2} def bar(x, y): print x + y bar(**dct) #使用时字典噶和参数数必须一致 ''' # 11完全版的装饰器 ''' def logger(func): def inner(*args, **kwargs): print 'Arguments were: %s, %s' %(args, kwargs) return func(*args, **kwargs) return inner @logger def foo1(x, y = 1): return x * y @logger def foo2(): return 2 print foo1(5, 4) print foo1(1) foo() '''
game_input = ['Default','-','-','-','-','-','-','-','-','-'] winner = None game_running = True # print(type(game_input)) # ----------------Functions---------------- def gameBoard(): print(game_input[7] + " | " + game_input[8] + " | " + game_input[9] + "\t\t7 | 8 | 9") print("-------------") print(game_input[4] + " | " + game_input[5] + " | " + game_input[6] + "\t\t4 | 5 | 6") print("-------------") print(game_input[1] + " | " + game_input[2] + " | " + game_input[3] + "\t\t1 | 2 | 3") def Player1(num): if game_input[num] == '-': game_input[num] = 'X' else: print("You cannot come here... Try Again") # valid = False while True: num = int(input("Enter position of X : ")) if game_input[num] == '-': game_input[num] = 'X' break else: continue def Player2(num): if game_input[num] == '-': game_input[num] = 'O' else: print("You cannot come here... Try Again") # valid = False while True: num = int(input("Enter position of O : ")) if game_input[num] == '-': game_input[num] = 'O' break else: continue def checkGameOver(): win() gameTie() def win(): global winner row_winner = check_rows() diag_winner = check_diagonals() col_winner = check_columns() if row_winner: winner = row_winner elif diag_winner: winner = diag_winner elif col_winner: winner = col_winner else: winner = None def check_rows(): global game_running # winner = None row1 = game_input[7] == game_input[8] == game_input[9] != '-' row2 = game_input[4] == game_input[5] == game_input[6] != '-' row3 = game_input[1] == game_input[2] == game_input[3] != '-' if row1 or row2 or row3: game_running = False if row1: return game_input[7] elif row2: return game_input[4] elif row3: return game_input[1] else: return None def check_columns(): global game_running col1 = game_input[7] == game_input[4] == game_input[1] != '-' col2 = game_input[8] == game_input[5] == game_input[2] != '-' col3 = game_input[9] == game_input[6] == game_input[3] != '-' if col1 or col2 or col3: game_running = False if col1: return game_input[7] elif col2: return game_input[8] elif col3: return game_input[9] else: return None def check_diagonals(): global game_running diagonal_1 = game_input[7] == game_input[5] == game_input[3] != '-' diagonal_2 = game_input[9] == game_input[5] == game_input[1] != '-' if diagonal_1 or diagonal_2: game_running = False if diagonal_1: return game_input[7] elif diagonal_2: return game_input[9] else: return None def gameTie(): global game_running if '-' not in game_input: game_running = False return True else: return False def playGame(): global game_running # game_running = True while game_running: gameBoard() prompt_msg = "Enter Position of Player1 : " num = int(input(prompt_msg)) Player1(num) gameBoard() checkGameOver() if game_running == False: break print("\n") prompt_msg = "Enter Position of Player2 : " num = int(input(prompt_msg)) Player2(num) gameBoard() print("\n\n") checkGameOver() if winner == "X" or winner == "O": print("\nCongratulations! Winner is " + winner) elif winner == None: print("Tie") # ------------------Start Execution of the program---------------------- playGame()
# CURRENTLY EXCLUSIVELY SEARCHES FOR PROPERTIES IN GLASGOW. NEED TO FIND WAY TO DYNAMICALLY GENERATE URL FOR OTHER # LOCATIONS # TODO: Find full post code for property then use that to find price of sold properties for DUV # TODO: Do analysis and write to excel file using openpyxl to automate import requests from requests.exceptions import HTTPError from bs4 import BeautifulSoup import csv # TODO: find a way to dynamically generate the locationIdentifier parameter in the url to be able to search # for specific cities that can be entered as an argument for the `fetch_html` function. # Data structure for dynamically generated urls to feed into `fetch_html()` ? def generate_url(location) -> str: """ Generates urls of city to be searched using the rightmove api and the locationIdentifier parameter to dynamically generate the required url by taking in either a city name or post code as an argument. :param location: Name of city or post code to be searched. :return: url string """ region_endpoint_url = "https://www.rightmove.co.uk/typeAhead/uknostreet/" count = 0 for char in location.upper(): if count == 2: region_endpoint_url += '/' count = 0 region_endpoint_url += char count += 1 region_response = requests.get(region_endpoint_url).json() searched_locations = region_response['typeAheadLocations'] for entry in searched_locations: confirm_location = input(f"Is {entry['displayName']} the search entry you are looking for ? Enter Y/N:").upper() if confirm_location == 'Y': region_code = entry['locationIdentifier'] return region_code else: continue def fetch_html(parsed_url): """ Uses the requests module to return a response from a constructed a www.rightmove.co.uk entered as an argument. :param parsed_url: url string needed to make https request. :return: response object from get request to city listings on rightmove.co.uk. """ try: rm_response = requests.get(parsed_url) rm_response.raise_for_status() except HTTPError as http_err: print(f"HTTP error occurred: {http_err}") return else: print(f"Success | Status Code: {rm_response.status_code}") return rm_response def site_parser(response) -> list: """ Makes 'soup' out of html to parse and scrapes relevant data and returns a list of dictionaries containing the info. :param response: response from http request. :return: list of dictionaries """ # TODO: Exception handling if request doesn't get html page because it's out of range. html = response.content content = BeautifulSoup(html, 'lxml') titles = [titles.text.strip('\n ') for titles in content.find_all('h2', {'class': 'propertyCard-title'})] addresses = [addresses['content'] for addresses in content.find_all('meta', {'itemprop': 'streetAddress'})] descriptions = [descriptions.text.strip('\n') for descriptions in content.find_all('div', {'class': 'propertyCard-description'})] prices = [prices.text.strip(' ') for prices in content.find_all('div', {'class': 'propertyCard-priceValue'})] # TODO: Extract date only. If no date available need to find someway to skip. date_added = [date_added.text for date_added in content.find_all('span', {'class': 'propertyCard-contactsAddedOrReduced'})] sellers = [sellers.text.strip(' by') for sellers in content.find_all('span', {'class': 'propertyCard-branchSummary-branchName'})] property_urls = [f'www.rightmove.co.uk{property_urls["href"]}' for property_urls in content.find_all('a', {'class': 'propertyCard-link'})] # List of dictionaries made to contain all the data for each property in a single dictionary each. results = [] for index in range(1, len(titles)): data_dict = { 'Title': titles[index], 'Address': addresses[index], 'Description': descriptions[index], 'Price': prices[index], 'Date added': date_added[index], 'Seller': sellers[index], 'Url': property_urls[index] } results.append(data_dict) return results # TODO: Exception handling if file isn't there. def write_to_csv(data_to_write: list): """ Takes in list of data and then writes it to csv file called RealEstateData.csv. :param data_to_write: list that is to be written to csv """ with open('RealEstateData.csv', 'w') as csv_file: fieldnames = data_to_write[0][0].keys() writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for _ in data_to_write: for row in _: writer.writerow(row) print("Stored results to 'RealEstateData.csv'") # --------------------CONTROL FLOW OF PROGRAM--------------------: # GENERATE URL (DYNAMICALLY GENERATED OR HARD CODED) # PASS URL INTO `fetch_html` AS AN ARGUMENT # PASS OUTPUT OF `fetch_html` INTO `site_parser' # EXPORT RESULTS OF SCRAPING TO CSV # csv_results = [] # # Scraping the results from the search and saving to csv file # for page in range(1, 42): # page_index = 24 * page # url = f'https://www.rightmove.co.uk/property-for-sale/find.html?locationIdentifier=REGION%5E550&' \ # f'index={page_index}&propertyTypes=&mustHave=&dontShow=&furnishTypes=&keywords=' # response = fetch_html(url) # data = site_parser(response) # csv_results.append(data) # # # write_to_csv(csv_results)
vel = float(input('Qual a velocidade do carro? (km/h): ')) if vel > 80: print('\033[31mVocê foi multado em {:.2f} reais por exceder a velocidade em {} km.\033[m'.format((vel - 80) * 7, vel - 80)) else: print('\033[36mVocê não foi multado.\033[36m')
media = 0 mais_velho = ' ' idade_pessoa = 0 total_mulheres = 0 idade_mulheres = 0 for pessoa in range(1,5): print('----- PESSOA {} -----'.format(pessoa)) nome = str(input('Nome: ')).strip() idade = int(input('Idade: ')) sexo = str(input('Sexo [M/F]: ')).strip().upper() if pessoa == 1 and sexo == 'M': mais_velho = nome idade_pessoa = idade if sexo == 'M' and idade > idade_pessoa: mais_velho = nome idade_pessoa = idade if sexo == 'F' and idade < 20: total_mulheres += 1 media += idade print('A média de idade do grupo é {:.1f}.'.format(media / 4)) print('O homem mais velho do grupo tem {} anos e se chama {}.'.format(idade_pessoa, mais_velho.capitalize())) print('O total de mulheres com menos de 20 anos é {}.'.format(total_mulheres))
maior = 0 menor = 0 for c in range(1,6): peso = float(input(f'Digite o peso da {c} pessoa: ')) if c == 1: maior = peso menor = peso else: if peso > maior: maior = peso if peso < menor: menor = peso print(f'O maior peso encontrado foi {maior}Kg;') print(f'O menor peso encontrado foi {menor}Kg.')
nome = str(input('Digite seu nome: ')).upper().strip() nome = ''.join(nome.split()) nomerev = nome[::-1] '''contador = 0 for c in range(len(nome), 0, -1): print(nome[contador], end='') contador += 1''' print(f'O reverso de {nome} é {nomerev}.') if nome == nomerev: print('O nome É um PALÍNDROMO.') else: print('O nome NÃO É um PALÍNDROMO.')
nome = str(input('Qual seu nome completo? ')).strip() n = nome.upper().split() print('O seu nome tem Silva? {}'.format('SILVA' in n))
""" This program acts like a sketchbook. Drag the turtle to draw freely on the window. """ import turtle # This function jumps the turtle to the place clicked on the screen. def jump(x, y): turtle.up() turtle.goto(x, y) turtle.down() turtle.reset() turtle.shape("turtle") turtle.pencolor("blue") # Color is set to blue turtle.pensize(3) # Pensize is set to 3 turtle.onscreenclick(jump) turtle.ondrag(turtle.goto) # The goto function uses the x and y coordinates passed by ondrag event turtle.speed(0) turtle.done()
import tkinter import webbrowser app = tkinter.Tk() # create windows app.title("google finder") # get name search_label = tkinter.Label(app, text="search") # add search text search_label.grid(row=0, column=0) # set search position on grid text_field = tkinter.Entry(app, width=25) # add enter text text_field.grid(row=0, column=1) # search fnk by button def search(): if text_field.get().strip() !="": webbrowser.open("https://www.google.com/search?q=" + text_field.get()) # search fnk by enter def enterBtn(event): if text_field.get().strip() != "": webbrowser.open("https://www.google.com/search?q=" + text_field.get()) button_1 = tkinter.Button(app, text="find", width=15, command=search) # add button button_1.grid(row=0, column=2) text_field.bind("<Return>", enterBtn) # event enter app.wm_attributes("-topmost", True) app.mainloop() # loop while close app
def quickSort(arrList): performQuickSort(arrList,0,len(arrList)-1) def performQuickSort(arrList,start,end): if start<end: splitPoint = partition(arrList,start,end) performQuickSort(arrList,start,splitPoint-1) performQuickSort(arrList,splitPoint+1,end) def partition(arrList,start,end): pivot,l,r = arrList[start],start+1,end while True: while arrList[l] < pivot and l<=r: l+=1 while arrList[r] > pivot and l<=r: r-=1 if l<=r: arrList[l],arrList[r] = arrList[r],arrList[l] else: arrList[start],arrList[r] = arrList[r],arrList[start] break return r if __name__ == "__main__": arr = [54, 26, 93, 17, 77, 31, 44, 55, 20] quickSort(arr) print(arr)
from QueueApplicationPrograms.RadixSort import RadxiSort class SortingClass: def BubbleSortComplete(self,arrList): for x in range(0,len(arrList)-1): for y in range(0,len(arrList)-x-1): if arrList[y] >= arrList[y+1]: arrList[y],arrList[y+1] = arrList[y+1],arrList[y] def BubbleSortStopWhenSorted(self,arrList): unSorted,limit = True,len(arrList) while unSorted: unSorted = False limit -= 1 for y in range(limit): if arrList[y] >= arrList[y+1]: unSorted = True arrList[y],arrList[y+1] = arrList[y+1],arrList[y] def SelectionSort(self,arrList): for pos in range(len(arrList) - 1): min = pos for i in range(pos + 1, len(arrList)): if (arrList[i] < arrList[min]): min = i arrList[min], arrList[pos] = arrList[pos], arrList[min] def InsertionSort(self,arrList): for x in range(len(arrList) - 1): check, y = arrList[x + 1], x while (arrList[y] > check) and y >= 0: arrList[y + 1] = arrList[y] y -= 1 arrList[y + 1] = check def ShellSort(self,arrList): interval = len(arrList) // 2 while interval > 0: for start in range(interval): self.__sortAccording(arrList, start, interval) interval //= 2 def __sortAccording(self,arrList, start, interval): #Private Method (Made accessible only for members inside Class for ShellSort) for x in range(start + interval, len(arrList), interval): pos, value = x, arrList[x] while pos >= interval and arrList[pos - interval] > value: arrList[pos] = arrList[pos - interval] pos -= interval arrList[pos] = value def MergeSort(self,arrList): if len(arrList) > 1: mid = len(arrList) // 2 leftArr = arrList[:mid] rightArr = arrList[mid:] self.MergeSort(leftArr) self.MergeSort(rightArr) self.__merge(arrList, leftArr, rightArr) def __merge(self,arrList, leftArr, rightArr): #Private Method (Made accessible only for members inside Class for MergeSort) i, j, k = 0, 0, 0 while i < len(leftArr) and j < len(rightArr): if leftArr[i] < rightArr[j]: arrList[k] = leftArr[i] i += 1 else: arrList[k] = rightArr[j] j += 1 k += 1 while i < len(leftArr): arrList[k] = leftArr[i] k, i = k + 1, i + 1 while j < len(rightArr): arrList[k] = rightArr[j] k, j = k + 1, j + 1 def QuickSort(self,arrList): self.__performQuickSort(arrList, 0, len(arrList) - 1) def __performQuickSort(self,arrList, start, end): #Private Method (Made accessible only for members inside Class for QuickSort) if start < end: splitPoint = self.__partition(arrList, start, end) self.__performQuickSort(arrList, start, splitPoint - 1) self.__performQuickSort(arrList, splitPoint + 1, end) def __partition(self,arrList, start, end): #Private Method (Made accessible only for members inside Class for QuickSort) pivot, l, r = arrList[start], start + 1, end while True: while arrList[l] < pivot and l <= r: l += 1 while arrList[r] > pivot and l <= r: r -= 1 if l <= r: arrList[l], arrList[r] = arrList[r], arrList[l] else: arrList[start], arrList[r] = arrList[r], arrList[start] break return r def BucketSort(self,arrList): largest, length = max(arrList), len(arrList) size = largest // length buckets = [[] for _ in range(length)] for i in range(length): j = arrList[i] // size if j != length: buckets[j].append(arrList[i]) else: buckets[length - 1].append(arrList[i]) for i in range(length): self.InsertionSort(buckets[i]) result = [] for i in range(length): result = result + buckets[i] return result def RadixSorting(self,arrList): return RadxiSort(arrList) if __name__ == "__main__": print("\n\n____________ King Of Sorting Class - Implements All forms of Sorting ____________") sortObj = SortingClass() ListBubble,ListBubble2,ListSelection,ListInsertion,ListShell,ListMerge,ListQuick,ListBucket,ListRadix = [22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23],[22,99,45,87,1,9,42,66,23] print("\nBefore Sorting : \t\t\t",ListBubble) sortObj.BubbleSortComplete(ListBubble) sortObj.BubbleSortStopWhenSorted(ListBubble2) sortObj.SelectionSort(ListSelection) sortObj.InsertionSort(ListInsertion) sortObj.ShellSort(ListShell) sortObj.MergeSort(ListMerge) sortObj.QuickSort(ListQuick) ListBucket = sortObj.BucketSort(ListBucket) #Note the return Function Carefully ListRadix = sortObj.RadixSorting(ListRadix) #Note the return Function Carefully print("\nBubble Sort Complete : \t\t",ListBubble,"\nBubble Sort StopAfterSort : ",ListBubble2,"\nSelectionSort : \t\t\t",ListSelection,"\nInsertion Sort : \t\t\t",ListInsertion,"\nShell Sort : \t\t\t\t",ListShell,"\nMerge Sort : \t\t\t\t",ListMerge,"\nQuick Sort : \t\t\t\t",ListQuick,"\nBucket Sort : \t\t\t\t",ListBucket,"\nRadix Sort (Reverse) : \t\t\t[",ListRadix,"]") print("\nNote: We imported the Radix Sort Function from QueueApplication & made a Call to that function from our Class Method, We have returned a Queue in Radix Sort which is why we don't get the list representation\n\n__________ All forms of Sorting Works Perfectly __________")
#!python3 # Python Database API import sqlite3 con = sqlite3.connect('D:\SQL\Ex_Files_SQL_EssT\Exercise Files\db\world.db') cursor = con.cursor() sql1 = 'DROP TABLE IF EXISTS EMPLOYEE' sql2 = ''' CREATE TABLE EMPLOYEE ( EMPID INT(6) NOT NULL, NAME CHAR(20) NOT NULL, AGE INT, SEX CHAR(1), INCOME FLOAT ) ''' # executing sql statements #cursor.execute(sql1) #cursor.execute(sql2) # Inserting Data into Table execute() : single row, executeMany() : multiple rows # preparing sql statement rec = (456789, 'Frodo', 45, 'M', 100000.00) sql = ''' INSERT INTO EMPLOYEE VALUES ( ?, ?, ?, ?, ?) ''' # executing sql statement using try ... except blocks try: cursor.execute(sql, rec) con.commit() except Exception as e: print("Error Message :", str(e)) con.rollback() # preparing sql statement records = [ (123456, 'John', 25, 'M', 50000.00), (234651, 'Juli', 35, 'F', 75000.00), (345121, 'Fred', 48, 'M', 125000.00), (562412, 'Rosy', 28, 'F', 52000.00) ] sql = ''' INSERT INTO EMPLOYEE VALUES ( ?, ?, ?, ?, ?) ''' try: cursor.executemany(sql,records) con.commit() except Exception as e: print('Error Message :',str(e)) con.rollback() # Fetching data fetchone() : one record at a time, fetchall() : retrive all [ both in the form of tuples] sql = ''' SELECT * FROM EMPLOYEE ''' try: cursor.execute(sql) except Exception as e: print('Unable to fetch data.') records = cursor.fetchall() for record in records: print(record) # closing the connection con.close() # Object Reational Mapper (ORM) :library that automates the transfer of data stored in relational database tables # into objects that are adopted in application code. ''' Normal Query : SELECT * FROM EMPLOYEE WHERE INCOME=10000.00 Django code : emps = Employee.objects.filter(income=10000.00) '''
#!python3 import time import calendar ticks = time.time() print("No of ticks since 12:00 am January 1st, 1970 : ",ticks ) print(time.localtime()) local_time = time.localtime(time.time()) print(local_time) # convert time in readable format print(time.asctime(local_time)) # calender related functions calc = calendar.month(2020,2) print(calc) print(calendar.isleap(2020))
from credentials import Credential class User: """ this class generates new instances of users """ pass users_array = [] def __init__(self, first_name, last_name, phone_number, email, ): self.first_name = first_name self.last_name = last_name self.phone_number = phone_number self.email = email def save_user_details(self): """ save_contact method saves contact objects into user_array """ User.users_array.append(self) @classmethod def log_in(cls, name, password): ''' Method that allows a user to log into their credential Args: name : name of the user password : password for the user Returns: Credential list for the user that matches the name and password False: if the name or password is incorrect ''' # Search for the user in the user list for user in cls.users_array: if user.first_name == name and user.last_name == password: return Credential.credential_list return False @classmethod def display_users(cls): """ method that returns the class array """ return cls.users_array
######################################################## class Node(object): ''' Node object used to create Linked list ''' def __init__(self, data): self.data = data self.next = None def __str__(self): nstr = 'Data:' + str(self.data) nstr += '\n' if (self.next != None): nstr += 'Next Node data:' + str(self.next.data) else: nstr += 'No next Node' return nstr def get_next(self): return self.next ######################################################## ######################################################## class LinkedList(object): ''' Linked list object ''' def __init__(self, head): self.head = head def __str__(self): lstr = "" tmp = self.head while tmp.next: lstr += str(tmp.data) + " -> " tmp = tmp.next lstr += str(tmp.data) return lstr def add(self, data): head = self.head while (head.next): head = head.next head.next = Node(data) head.next.next = None return def reverseBetween(self, m, n): head = self.head if (m == n): return head if head.next == None: return head temp_head = head short_head = head short_tail = head.next for x in range(n): if (x == m-2): short_head = temp_head if (x == n-1): short_tail = temp_head temp_head = temp_head.next upd_head_node = short_head upd_tail_node = short_tail if (m != 1): tmp_node = upd_head_node.next upd_head_node.next = short_tail upd_head_node = tmp_node upd_head_node_next = upd_head_node.next upd_head_node.next = short_tail.next else: tmp_node = upd_head_node.next upd_head_node.next = short_tail.next upd_head_node_next = tmp_node i = 0 while upd_head_node_next != short_tail: tmp_node = upd_head_node_next.next upd_head_node_next.next = upd_head_node upd_head_node = upd_head_node_next upd_head_node_next = tmp_node upd_head_node_next.next = upd_head_node if (m == 1): head = short_tail return head @staticmethod def FromList(l : list): if l: head = Node(l[0]) else: head = None node = head for x in range(1,len(l)): node.next = Node(l[x]) node = node.next node.next = None return head @staticmethod def ToList(head: Node): l = [] if head == None: return l else: while (head.next): l.append(head.data) head = head.next l.append(head.data) return l ######################################################## def findMerge_N(l1, l2): temp = l1.head map1 = {} while temp != None: map1[str(temp)] = 1 temp = temp.get_next() temp1 = l2.head while temp1 != None: if str(temp1) in map1: return temp1 temp1 = temp1.get_next() return None if __name__ == '__main__': a = Node(10) b = Node(20) a.next = b print(a) print(b) ll = LinkedList(a) ll.add(30) l = [1,2,3,4,5,6] ll2 = LinkedList(LinkedList.FromList(l)) print (ll) print (ll2) l1 = LinkedList.ToList(ll2.head) assert (LinkedList.ToList((ll2.reverseBetween(2,4))) == [1,4,3,2,5,6]) #print (LinkedList(ll2.reverseBetween(2,4))) #print (LinkedList(ll2.reverseBetween(2,4)))
# if False: # pass # else: # print("ok") # print("hao") # for i in range(0,10,2): # print(i) # 换行输出 # print(i, end="") # 不换行输出 # print(i, end="|") # 不换行输出,每个字符之间用|隔开 # for j in range(10,0,-2): # print(j,end=",") # 输出 10,8,6,4,2, a=[1,2,0,4,5,6,7,8,9] for i in range(0,len(a),2): print(a[i],end="|")
# a=1 # b=0 # c=a/b # 出现异常 # print("haha") # 代码不会被执行 # a=1 # b=0 # try: # c=a/b # except ZeroDivisionError : # print("除数不能为0") # print("haha") # 抛出了异常,代码会被执行 # # # a=1 # b=0 # try: # c=a/b # print(c) # except Exception as e: # print("除数不能为0") # print(e) # division by zero # else: # print("else语句") # finally: # print("finally语句") # print("haha") # 抛出了异常,代码会被执行 try: raise Exception("错误代码") except Exception as e: # 在python3.0中用as代替逗号 print(e)
# # map(func, seq1[, seq2...]) # map()函数是将func作用于seq中的每一个元素, # 并用一个列表给出返回值,如果func为 None,作用同zip()。 # def f(x): return x*x res = map(f, range(10)) mres = list(res) print("map:", mres) # # reduce(func, seq[, init]) # reduce函数即为化简,它是这样一个过程: # 每次迭代,将上一次的迭代结果(第一次时为init的元素, # 如没有init则为seq的第一个元素)与下一个元素一同执行一个二元的func函数。 # from functools import reduce def add(x, y): return x+y res = reduce(add, range(1, 10)) print("reduce:", res) # # filter(function or None, iterable) # filter函数用于过滤序列中某些元素。和map、 reduce函 # 数一样, filter也接收一个函数和一个序列,不同的是, # filter把传入的函数参数作用于序列中每一个元素,然后 # 根据返回值判断是true还是false来决定该元素是否被丢弃。 # def is_odd(n): return n % 2 == 1 res = list(filter(is_odd, [1,2,3,5])) print("filter:", res) # # sorted(iterable, key=None, reverse=False) # sorted()也是一个高阶函数,可以接收一个key函 # 数来实现自定义的排序。用sorted()排序的关键在 # 于实现一个映射函数。 # res = sorted([1, -3, -2, 4], key = abs) print(res) # # 输入(input)输出(print) # print("{0} + {1} = {2}".format(1, 2, 3)) # # handle = open(file, mode = 'r', encoding = None) # file是文件名 # mode是文件操作的模式,有’w+’ , ’a+’, ’r’ , ’w’ 等等。 # encoding是文件的编码方式 # handle.close() 关闭文件 # file.cloed # file.mode # file.softspace # file.write() # file.read([count]) # file.seek(offset, [, from]) # file = open("lx.txt", "w+") file.write("2017\n") file.seek(0, 0) # 将指针定位到文件开头 print(file.read()) file.close() # 刷新缓冲区里任何还没写入的信息 # # python的os模块 # os.rename(current_file_name, new_file_name) # os.remove(file) # os.mkdir("newdir") # os.chdir("newdir") # os.getcwd() # os.rmdir("dirname") # # # 进程通信 # from multiprocessing import Process, Queue import os, time, random # 写数据进程 def write(q): print("Process to write: $s" % os.getpid()) for value in ["A", "B", "C"]: print("Put %s to queue..." % value) q.put(value) time.sleep(random.random()) # 读数据进程 def read(q): print("Process to read: %s" % os.getpid()) while True: value = q.get(True) print("Get %s from queue." % value) if __name__ == "__main__": q = Queue() pw = Process(target=write, args=(q,)) pr = Process(target=read, args=(q,)) # 启动子进程 pw.start() pr.start() # 等待pw进程结束 pw.join() # 强行终止pr进程 pr.terminate() # # 创建线程 # from threading import Thread class MyThread(Thread): def __init__(self): Thread.__init__(self, name = "My Thread") def run(self): print("Hello, my name is %s" % self.getName()) process = MyThread() process.start()
a=int(raw_input()) g=raw_input() #print len(g) b=g.lower() c=set(b) #print len(c),len(b) if len(g)<26: print "NO" if len(g)>=26: if len(c)==26: print "YES" else: print "NO"
import calendar print('Welcome to the Calendar application!') year = int(input('Please enter any year:')) month = int(input('Please enter any month number:')) print(calendar.month(year, month)) print('Have a nice day!')
import itertools as it import math as m def get_next_squared_number(n): # Easy case if n == 1: return 1 # Toggle directions directions = it.cycle([ (+1, 0), # right (0, +1), # up (-1, 0), # left (0, -1) # down ]) neighbors = [ (+1, 0), # right (+1, +1), # up right (0, +1), # up (-1, +1), # up left (-1, 0), # left (-1, -1), # down left (0, -1), # down (+1, -1) # down richt ] cur_direction = next(directions) positions_and_vals = {} cur_position = (0, 0) positions_and_vals[cur_position] = 1 # How to build a spiral: # 1 right, 1 up, 2 left, 2 down, 3 right, 3 up, 4 left, 4 down... # These values are always lower than the sqrt of the highest value in the # spiral. sequence_done_one_time = True # Because val 1 was already set # +2, because better save than sorry steps_per_direction = list(range(1, m.floor(m.sqrt(n)) + 2)) steps_per_direction_position = 0 n_steps_to_go_in_direction = 1 cur_val = 1 def sum_over_all_existing_neighbors( cur_position, positions_and_vals=positions_and_vals, neighbors=neighbors ): val = 0 for neighbor in neighbors: val += positions_and_vals.get(( cur_position[0] + neighbor[0], cur_position[1] + neighbor[1] ), 0) return val for i in range(2, n + 100): # Should be enough :D if cur_val <= n: cur_position = ( cur_position[0] + cur_direction[0], cur_position[1] + cur_direction[1] ) cur_val = sum_over_all_existing_neighbors( cur_position ) positions_and_vals[cur_position] = cur_val n_steps_to_go_in_direction -= 1 if n_steps_to_go_in_direction == 0: if sequence_done_one_time: n_steps_to_go_in_direction = steps_per_direction[ steps_per_direction_position ] steps_per_direction_position += 1 else: n_steps_to_go_in_direction = steps_per_direction[ steps_per_direction_position ] sequence_done_one_time = not sequence_done_one_time cur_direction = next(directions) else: return cur_val def solve(file): with open(file, 'r') as f: number = f.read() number = number.rstrip('\n') number = int(number) return get_next_squared_number(number) # Test examples ### No real tests on this riddle # # Solve riddle print(solve('riddle.txt'))
def solve(file): with open(file, 'r') as f: content = f.read() content = content.rstrip('\n') n_digits = len(content) n_digits_half = int(n_digits/2) total = 0 for i in range(0, n_digits_half): if content[i] == content[n_digits_half + i]: total += int(content[i]) # The second half should be the same --> Two times total return total * 2 # Test examples files_and_solutions = { 'test1.txt': 6, 'test2.txt': 0, 'test3.txt': 4, 'test4.txt': 12, 'test5.txt': 4, } for file, solution in files_and_solutions.items(): assert solve(file) == solution # Solve riddle print(solve('riddle.txt'))
import numpy as np def factorial(n): num = 1 if (n != 0): num = n * factorial(n-1) return(num) print(factorial(3))
def sum_n(n): s = 0 for i in range(1,n+1): s+=i return s def sum(n): return n*(n+1)//2 #슬래시 두개는 정수 나눗셈 print(sum_n(10)) print(sum(10))
def hanoi(n, from_pos, to_pos, aux_pos): if n == 1: print(from_pos, "->", to_pos) return else: hanoi(n-1, from_pos, aux_pos, to_pos) print(from_pos, "->", to_pos) hanoi(n-1, aux_pos, to_pos, from_pos) print("n=1") print(hanoi(1,1,3,2)) print("n=2") print(hanoi(2,1,3,2)) print("n=3") print(hanoi(3,1,3,2))
def max_val(a): m = 0 for i in range(len(a)): if m < a[i]: m = a[i] return m def max_index(a): max_idx = 0 for i in range(len(a)): if a[i] > a[max_idx]: max_idx = i return max_idx a = [5, 7, 9, 10, 2, 12, 20, 0] print(max_val(a)) print(max_index(a))
# -*- coding: utf-8 -*- import matplotlib.pyplot as plt import numpy as np ### y = 3x_1 - 2x_2 + 1 데이터 생성 x1 = np.random.rand(100, 1) # 0~1까지 난수를 100개 만든다 x1 = x1 * 4 - 2 # 값의 범위를 -2~2로 변경 x2 = np.random.rand(100, 1) # x2에 대해서도 같게 x2 = x2 * 4 - 2 y = 3 * x1 - 2 * x2 + 1 ### 학습 from sklearn import linear_model x1_x2 = np.c_[x1, x2] # [[x1_1, x2_1], [x1_2, x2_2], ..., [x1_100, x2_100]] # 형태로 변환 model = linear_model.LinearRegression() model.fit(x1_x2, y) ### 계수, 절편, 결정 계수를 표시 print('계수', model.coef_) print('절편', model.intercept_) print('결정계수', model.score(x1_x2, y)) ### 그래프 표시 y_ = model.predict(x1_x2) # 구한 회귀식으로 예측 plt.subplot(1, 2, 1) plt.scatter(x1, y, marker='+') plt.scatter(x1, y_, marker='o') plt.xlabel('x1') plt.ylabel('y') plt.subplot(1, 2, 2) plt.scatter(x2, y, marker='+') plt.scatter(x2, y_, marker='o') plt.xlabel('x2') plt.ylabel('y') plt.tight_layout() plt.show()
def makeMatrix(): row = int(input("행의 수 입력 : ")) col = int(input("열의 수 입력 : ")) print() mat = list() for i in range(0, row): tmp = list() tmp = input(str(i+1)+'행의 값을 입력하세요(예, 1 2 3) : \n') tmp = list(map(int, tmp.split(' '))) mat.append(tmp) return mat def printMatrix(matrix): for i in matrix: for item in i: print(item,' ', end='') print() def makeUpper(matrix): tmp = matrix for i in range(0,len(tmp)): # i행을 기준으로 연산 start = tmp[i] frontNum = tmp[i][i] for j in range(i + 1, len(tmp)): # i행 이후 연산시작 front = tmp[j][i] if front == 0 : continue for k in range(0, len(tmp[j])): tmp[j][k] = tmp[j][k] / front * frontNum tmp[j][k] = start[k] - tmp[j][k] matrix = tmp return matrix if __name__ == '__main__': matrix = makeMatrix() print() printMatrix(matrix) print('\nupper 변경 후\n') matrix = makeUpper(matrix) printMatrix(matrix)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 22 10:46:14 2019 @author: markashworth """ """ Program to answer Q's 2 - 6 of Homework 6. """ import matplotlib.pyplot as plt import numpy as np import urllib.request import pandas as pd class NL_linearRegression(object): """ Linear regression class with a non-linear transformation applied """ def __init__(self, data): """ data is the training data, data frame """ self.data = data self.X = np.vstack((self.data['x1'], self.data['x2'])).transpose() self.Y = np.array(self.data['y']) self.Z = self.mapZ() self.w = self.calcW() def mapZ(self): """ Function to map to non-linear z coordinates """ x1 = self.X[:,0] x2 = self.X[:,1] return np.vstack((np.ones(np.size(x1)), x1, x2, x1**2, x2**2, x1*x2, \ np.abs(x1 - x2), np.abs(x1 + x2))).transpose() def calcW(self): """ Function to calculate the weights according to the linear regression algoridem """ pinvZ = np.linalg.pinv(self.Z) return np.dot(pinvZ, self.Y) @staticmethod def classificationError(w, Z, Y): """ Calculate the classification error """ sign = lambda x: x and (-1 if x < 0 else 1) Error = 0 N = np.size(Y) for i in range(N): if sign(np.dot(w, Z[i])) != Y[i]: Error += 1 return (1/N)*Error class RNL_linearRegression(NL_linearRegression): """ Regularised linear regression with a non-linear transformation applied. We use a weight-decay regulisation method. """ def __init__(self, data, k): """ k is the amount of regularisation exponent """ self.lam = 10**k super().__init__(data) def calcW(self): """ Function to calculate the weights according to the weight decay linear regression algoridem """ N = np.shape(self.Z)[1] return np.dot( np.linalg.inv(np.dot(self.Z.transpose(), self.Z) \ + self.lam*np.identity(N)), \ np.dot(self.Z.transpose(), self.Y)) def data_processing(name, url): """ Take .dat file and convert it to a pandas data frame """ urllib.request.urlretrieve(url, \ '/Users/markashworth/PythonTings/Learning from' \ + ' Data/Homework 6/' + name + '.dat') return pd.read_csv(name + '.dat', sep = ' ', header=None, skipinitialspace=True,\ names=['x1', 'x2', 'y']) # Q(5) def findSmallestEout(train_dat, test_dat, K): """ Function to find smallest Eout given a range of k's, K """ EoutDict = {} for k in K: Train = RNL_linearRegression(train_dat, k) w = Train.w Test = RNL_linearRegression(test_dat, k) Eout = Train.classificationError(w, Test.Z, Test.Y) EoutDict.update({k:Eout}) return EoutDict def visualiseSmallestEout(EoutDict): """ Visualisation to accompany the findSmallestEout function """ plt.figure(0) for i in EoutDict.keys(): plt.plot(i, EoutDict[i], 'bo') plt.xlabel('Exponent k') plt.ylabel('E_out')
#2-2) Byte, Kilobyte, Megabyte, Gigabyte converter #Insert bytes and then your program shows many bytes in a kilobyte, megabyte and gigabyte. #You must real mega, giga and tera definations (so for example 2^20 or 2^30) #background information http://www.whatsabyte.com/ #Example output: #Give your input in bytes: 1563200 #bytes 1563200 B #Kilobytes 1526.5625 KB #Megabytes 1.49078369140625 MB #Gigabytes 0.001455843448638916 GB luku = int(input("anna bittimr: ")) Kluku=luku/1024 Mluku=Kluku/1024 Gluku=Mluku/1024 print("bytes %d B", %luku) print("Kilobytes %d B", %Kluku) print("Megabytes %d B", %Mluku) print("Gigabytes %d B", %Gluku)
#4-3) list calculation with type detection #calculate sum and average of elements in the list #ignore all values in the list that are not numbers #initializing the list with the following values #list = [1,2,3,4,5,6,7,8,9,20,30, "aa", "bee", 11, "test", 51, 63] #Example Output: #Sum 220/14 and average 15.71 list = [1,2,3,4,5,6,7,8,9,20,30, "aa", "bee", 11, "test", 51, 63] def summalaskuri(a): summa=0 luku=0 lippu=0 for luku in a: if type(luku) == int or type(luku) == float: summa += luku lippu += 1 print("Sum %d/%d and average %.2f" %(summa,lippu,(summa/lippu))) summalaskuri(list)
from important_objects import * import re def tokenize_file(file): file = open(file, "r") result = [] left_parens = 0 right_parens = 0 tmp_lst = [] for line in file: if not line or line[0] == '#' or line[0] == '\n': continue tmp_str = "" line = line.strip() for c in line: if c == "(": left_parens += 1 elif c == ")": right_parens += 1 elif c == " " and tmp_str != ' ': tmp_lst.append(tmp_str) #print(tmp_str) tmp_str = "" elif c == '\n': pass else: tmp_str += c tmp_lst.append(tmp_str) #print(left_parens, '------', right_parens) if left_parens == right_parens and tmp_str != " ": result.append(tmp_lst) tmp_lst = [] if left_parens != right_parens: raise ValueError() file.close() return result def tokenize_file2(file): file = open(file, "r") result = [] line = file.readline() while line: my_elems = [] elems = line.strip() if (not elems) or elems[0] == ' ' or elems[0] == '#': line = file.readline() continue count = count_parens(line) if count[0] == count[1]: # if the statement has the same number of left and right parens on one line line = line.replace('(', ' ') line = line.replace(')', ' ') my_elems = line.split() result.append(my_elems) line = file.readline() else: while(count[0] != count[1]): count = count_parens(line) line = line.replace('(', ' ') line = line.replace(')', ' ') tmp_lst = line.split() line = file.readline() my_elems.append(tmp_lst) result.append(my_elems) file.close() return result def parse_line(text): pass def count_parens(e): left_paren = 0 right_paren = 0 for c in e: if c == '(': left_paren += 1 if c == ')': right_paren += 1 return [left_paren, right_paren] # Check and see if txt file statement is one line or multiple (return true if parens are balanced in one line) def one_liner(e): left_paren = 0 right_paren = 0 for c in e: if c == '(': left_paren += 1 if c == ')': right_paren += 1 if left_paren == right_paren: return True else: return False # Parse suggestions def tokenize_suggestion_file(file): file = open(file, "r") list_of_suggestions = [] single_suggestion = [] line = file.readline() parts_of_suggestion = [] while line: line = line.strip() if line == '': line = file.readline() continue if line[:9] == ':subgoals': line = line[10:] subgoal_lst = [] while line: #print(line) if line[:12] == ':result-step': break line = line.replace('(', '') line = line.replace(')', '') elems = line.split() subgoal_lst.append(elems) line = file.readline() line = line.strip() parts_of_suggestion.append(subgoal_lst) continue if line[0] == '(': line = line.replace('(', '') line = line.replace(')', '') elems = line.split() parts_of_suggestion.append(elems) if line[:12] == ":result-step": line = line[13:] line = line.replace('(', '') line = line.replace(')', '') elems = line.split() parts_of_suggestion.append(elems) list_of_suggestions.append(parts_of_suggestion) parts_of_suggestion = [] single_suggestion = [] line = file.readline() file.close() return list_of_suggestions
from poker_game.config import SetOfCard class Card(SetOfCard): """ mark and number input in Card Attributes ---------- card_mark : int mark(♠︎❤︎♦︎♣️) card_number : int card number """ def __init__(self, card_mark, card_number): """ Parameters ---------- card_mark : int mark(♠︎❤︎♦︎♣️) card_number : int number """ self.mark = card_mark self.number = card_number self.rank = self.NUMBER_TO_RANK[self.number] self.pair = f"{self.MARKS[self.mark]}-{self.rank}" self.RANK_TO_NUMBER = SetOfCard.RANK_TO_NUMBER def __repr__(self): """ change instance input override Returns ------- combined mark and number str output Examples -------- >>> card = Card(2, 4) >>> print(card) ♦︎4 """ return self.pair
#Tweets lines from a text file #started 26/08/13 #by iceteawithlemon #using python-twitter & python 2.7 import twitter import login #Logs in with your developer credentials c_k, c_s, a_t_k, a_t_s = login.credentials() api = twitter.Api(c_k, c_s, a_t_k, a_t_s) print "\nSuccesfully authentificated.\n" #Opens text file and creates a list with each line of the file as an element. Checks if any tweets are too long. t=open('totweet.txt', 'r') totweet=t.read() t.close() tweetList=totweet.split("\n") for tweet in tweetList: if len(tweet)>139: print "This tweet is too long! Please shorten it!\n" print tweet, "\n" print "Text file is ready, about to fetch existing tweets from timeline.\n" #Fetches tweets on timeline, turns it into a usable format, and then into a list. statusesRaw = api.GetUserTimeline("Facts_Bot", count=20) statuses=[] for status in statusesRaw: tweet=str(status.text.encode("utf-8", "ignore")) statuses.append(tweet) print "List of existing tweets ready, about to try and tweet.\n" #Checks if first element in list has already been tweeted (prepared in the previous bit of code). If it has been tweeted, then it checks the next thing to be tweeted, if not it tweets it and stops. Tweeted=False i=0 while not Tweeted and i<len(tweetList): if i==0: tweetList[i]=tweetList[i][3:] if tweetList[i] not in statuses: print "I am going to tweet this now:\n" print tweetList[i], "\n" justTweeted=api.PostUpdate(tweetList[i]) print "I have now tweeted this:\n" print justTweeted.text Tweeted=True else: i+=1 if i>len(tweetList): print "Everything in this file has already been tweeted: please add new tweets to totweet.txt"
""" Utility functions which may be useful for clients. """ import json import Crypto from Crypto.PublicKey import RSA def to_json_string(obj): """Convert basic Python objects into a JSON-serialized string. This can be useful for converting objects like lists or dictionaries into string format, instead of deriving your own data format. This function can correctly handle serializing RSA key objects. This uses the JSON library to dump the object to a string. For more information on JSON in Python, see the `JSON library <https://docs.python.org/3/library/json.html>`_ in the Python standard library. :param obj: A JSON-serializable Python object :returns: A JSON-serialized string for `obj` :raises TypeError: If `obj` isn't JSON serializable. """ class CustomEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, Crypto.PublicKey.RSA._RSAobj): return {'__type__': '_RSAobj', 'PEMdata': str(obj.exportKey(format='PEM'), 'utf-8')} if isinstance(obj, Crypto.PublicKey.ElGamal.ElGamalobj): return {'__type__': 'ElGamalobj', 'y': obj.y, 'g': obj.g, 'p': obj.p} return json.JSONEncoder.default(self, obj) return json.dumps(obj, cls=CustomEncoder) def from_json_string(s): """Convert a JSON string back into a basic Python object. This function can correctly handle deserializing back into RSA key objects. This uses the JSON library to load the object from a string. For more information on JSON in Python, see the `JSON library <https://docs.python.org/3/library/json.html>`_ in the Python standard library. :param str s: A JSON string :returns: The Python object deserialized from `s` :raises JSONDecodeError: If `s` is not a valid JSON document. :raises TypeError: If `s` isn't a string. """ def Custom_decoder(obj): if '__type__' in obj and obj['__type__'] == '_RSAobj': return RSA.importKey(obj['PEMdata']) if '__type__' in obj and obj['__type__'] == 'ElGamalobj': return ElGamal.construct(tuple(obj['p'], obj['g'], obj['y'])) return obj return json.loads(s, object_hook=Custom_decoder) def RSA_to_json_string(obj): """Convert RSA key Python objects into a JSON-serialized string. This function can correctly handle serializing RSA key objects. This uses the JSON library to dump the object to a string. For more information on JSON in Python, see the `JSON library <https://docs.python.org/3/library/json.html>`_ in the Python standard library. This function makes sure that the order of keys in a JSON is deterministic (it always serializes the same data in the same way). If you decide to use your own serialization make sure it is deterministic as well. :param obj: A JSON-serializable Python object :returns: A JSON-serialized string for `obj` :raises TypeError: If `obj` isn't JSON serializable. """ class RSAEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, Crypto.PublicKey.RSA._RSAobj): return {'__type__': '_RSAobj', 'PEMdata': str(obj.exportKey(format='PEM'), 'utf-8')} return json.JSONEncoder.default(self, obj) return json.dumps(obj, cls=RSAEncoder, sort_keys=True) def RSA_from_json_string(s): """Convert a JSON string back into a RSA key Python object. This function can correctly handle deserializing back into RSA key objects. This uses the JSON library to load the object from a string. For more information on JSON in Python, see the `JSON library <https://docs.python.org/3/library/json.html>`_ in the Python standard library. :param str s: A JSON string :returns: The Python object deserialized from `s` :raises JSONDecodeError: If `s` is not a valid JSON document. :raises TypeError: If `s` isn't a string. """ def RSA_decoder(obj): if '__type__' in obj and obj['__type__'] == '_RSAobj': return RSA.importKey(obj['PEMdata']) return obj return json.loads(s, object_hook=RSA_decoder) def compute_edits(old, new): """Compute the in-place edits needed to convert from old to new Returns a list ``[(index_1,change_1), (index_2,change_2)...]`` where ``index_i`` is an offset into old, and ``change_1`` is the new bytes to replace. For example, calling ``compute_edits("abcdef", "qbcdzw")`` will return ``[(0, "q"), (4, "zw")]``. That is, the update should be preformed as (abusing notation): ``new[index:index+len(change)] = change`` :param str old: The old data :param str new: The new data :returns: A list of tuples (index_i, change_i) """ deltas = [] delta = None for index, (n, o) in enumerate(zip(new, old)): if n == o: if delta is not None: deltas.append(delta) delta = None else: if delta is None: delta = (index, []) delta[1].append(n) if delta is not None: deltas.append(delta) return [(i, "".join(x)) for i, x in deltas]
again=1 while again==1: arr=["1","2","3","4","5","6","7","8","9"] count=0 while count<9: print "\n ",arr[0]," | ",arr[1]," | ",arr[2]," \n" print " -----|-----|------\n" print " ",arr[3]," | ",arr[4]," | ",arr[5]," \n" print " -----|-----|------\n" print " ",arr[6]," | ",arr[7]," | ",arr[8]," \n" print "Player-",count%2+1 val=input("Enter you location: ") while arr[val-1] == 'O' or arr[val-1]=='X': val = input("Invalid input, Enter Again: ") if count%2==0: arr[val-1]='O' elif count%2==1: arr[val-1]='X' count=count+1 if (arr[0]=="O" and arr[1]=="O" and arr[2]=="O") or (arr[3]=="O" and arr[4]=="O" and arr[5]=="O") or (arr[6]=="O" and arr[7]=="O" and arr[8]=="O") or (arr[0]=="O" and arr[3]=="O" and arr[6]=="O") or (arr[1]=="O" and arr[4]=="O" and arr[7]=="O") or (arr[2]=="O" and arr[5]=="O" and arr[8]=="O") or (arr[0]=="O" and arr[4]=="O" and arr[8]=="O") or (arr[2]=="O" and arr[4]=="O" and arr[6]=="O"): print "Congratulations, Player 1 is the winner\n" break elif (arr[0]=="X" and arr[1]=="X" and arr[2]=="X") or (arr[3]=="X" and arr[4]=="X" and arr[5]=="X") or (arr[6]=="X" and arr[7]=="X" and arr[8]=="X") or (arr[0]=="X" and arr[3]=="X" and arr[6]=="X") or (arr[1]=="X" and arr[4]=="X" and arr[7]=="X") or (arr[2]=="X" and arr[5]=="X" and arr[8]=="X") or (arr[0]=="X" and arr[4]=="X" and arr[8]=="X") or (arr[2]=="X" and arr[4]=="X" and arr[6]=="X"): print "Congratulations, Player 2 is the winner\n" break if count==9: print "This game is a DRAW \n" break again=input("Do you want to play again(Enter '1' for yes): ")
import random class GuessGame: def __init__(self, difficulty, user_name): self.difficulty = difficulty self.user_name = user_name self.generated_secret_number = self.generate_number() self.user_input_value = 0 self.result = False def compare_results(self): return self.user_input_value == self.generated_secret_number def generate_number(self): return random.randint(1,self.difficulty) def get_guess_from_user(self): tmp = input("Can you Guess What number i'm Thinking of ? it's between {} and {}\n".format(1,self.difficulty)) if tmp.isdigit() and 1 <= int(tmp) <= self.difficulty: return int(tmp) else: print("Wrong input..") self.get_guess_from_user() def welcome_user(self): print(""" Welcome {} to Guess Game! """.format(self.user_name)) def play(self): self.welcome_user() self.user_input_value = self.get_guess_from_user() self.result = self.compare_results()
import re import sys import random import vector_utils def bool_dist_generator(n): """ Generates a 'n' element vector following uniform boolean distribution Generates random n bit data of count following uniform distribution :param n: number of inputs :return: returns a 'n' element boolean vector as a list """ input_data = list() for i in range(0, n): #Create 1 input vector of size 'n' input_data.append(random.randint(0, 1)) return input_data def spherical_dist_generator(n): """ Generates a 'n' element vector following spherical distribution Generates random n bit data of count following spherical distribution :param n: number of inputs :return: returns a vector of 'n' element spherical distribution as a list """ input_data = list() sum_sqrt = 0 for i in range(0, n): # Find a random float term term = random.random() input_data.append(term) #Calculate the sum of squares of all terms to normalize the input sum_sqrt += term ** 2 # print("term {}-->{}".format(j, term)) # print("Squared sum --> {}".format(sum_sqrt)) sum_sqrt = sum_sqrt ** (1/2.0) #print("Sq root sum --> {}".format(sum_sqrt)) #Normalize the vector for i in range(0, n): input_data[i] = input_data[i]/sum_sqrt return input_data def parse_ground_file(file_name): """ Parses the ground file to return a nbf or threshold function :param file_name: name of file containing the ground function :return: returns tuple (func_type, operands, operators) for nbf and (func_type, terms, threshold) for threshold funciton """ result = '' in_file = open(file_name) line = in_file.readline().strip() # print("Input line 1 --> {}".format(line)) if line == 'NBF': #print("Parsing nested boolean function") func_type = 'NBF' line = in_file.readline().strip() if not line: return(func_type, None, None) # print("Input line 2 --> {}".format(line)) func = re.search(re.compile('([+|-]\d+(\s+(AND|OR)\s+[+|-]\d+)*)?'),line).group(0) # print("Ground function -->{}".format(func)) if func: pattern_operands = re.compile('\s*([+|-]\d+)\s*') pattern_operators = re.compile('\s*(AND|OR)\s*') operands = re.findall(pattern_operands, func) #print("operands = {}".format(operands)) for i in range(len(operands)): try: operands[i] = int(operands[i]) except ValueError: # Handle the exception print('Please enter integer terms for NBF function') operators = re.findall(pattern_operators, func) #print("operators = {}".format(operators)) result = (func_type, operands, operators) print("ground function(function type, operands, operators) = {}".format(result)) elif line == 'TF': #print("Parsing threshold function") func_type = 'TF' line = in_file.readline() # print("Input line 2 --> {}".format(line)) # Search for the threshold value threshold = re.search(re.compile('[+|-]\d+'), line).group(0) if threshold: threshold = float(threshold) # print("threshold = {}".format(threshold)) line = in_file.readline() # print("Input line 3 --> {}".format(line)) func = re.search(re.compile('([+|-]\d+(\s+[+|-]\d+)*)?'), line).group(0) if func: # print("Ground function -->{}".format(func)) pattern_term = re.compile('\s*([+|-]\d+)\s*') terms = re.findall(pattern_term, func) for i in range(len(terms)): try: terms[i] = float(terms[i]) except ValueError: # Handle the exception print('Please enter numeric terms for TF function') # print("terms = {}".format(terms)) result = (func_type, terms, threshold) # print("ground function(function type, terms, threshold) = {}".format(result)) return result else: result = None else: result = None return result def cal_thresh_fn(input_x, terms, threshold): """ Calculate the value of the threshold function 'y' :param input_x: input values :param terms: coefficients of the threshold function :param threshold: threshold value :return: 'y' value as per threshold function """ # print("input_x --> {}".format(input_x)) # print("terms --> {}".format(terms)) lhs = vector_utils.dot_product(input_x,terms) if lhs >= threshold: return 1 else: return 0 def find_no_terms_nbf(operands): """ Finds number of inputs as per nbf in the ground function :param operands: The indices of the operands :return: No. of inputs """ max_index = 0 for operand in operands: if max_index < abs(operand): max_index = abs(operand) return max_index def cal_nbf_fn(input_x, operands, operators): """ Calculate the value of the threshold nbf 'y' :param input_x: input values :param terms: coefficients of the threshold function :param threshold: threshold value :return: 'y' value as per nbf function """ print("input_x = {}".format(input_x)) no_terms = len(operands) expression = '(' * (no_terms-1) #print("no_terms {}".format(no_terms)) for i in range(no_terms): print("i = {}".format(i)) #print("operands[i] = {}".format(operands[i])) if operands[i] == 0: sys.exit("NOT PARCEABLE") if operands[i] > 0: term = input_x[operands[i] - 1] else: term = 1 - input_x[abs(operands[i]) - 1] if i < no_terms - 1: expression += str(term) if i != 0: expression += ")" expression += " " + str(operators[i]).lower() + " " else: expression += str(term) + ")" #print("expression --> {}".format(expression)) #print("result -- {}".format(eval(expression))) return eval(expression) def generate_data(ground_file, count, dist): """ Generates data for training and testing :param ground_file: name of file containing the ground funciton :param count: no. of examples required :param dist: distribution followed by the inputs :return: list of data examples with each example being a tuple ([x],y) """ result = parse_ground_file(ground_file) data = list() if result: type = result[0] if type == 'NBF': print('parsed nbf') type, operands, operators = result if not operands and not operators: #Asssuming 5 terms in case of always 0 function no_inputs = 3 for i in range(count): # Ignore the distribution value input_x = bool_dist_generator(no_inputs) data.append((input_x, 0)) else: no_inputs = find_no_terms_nbf(operands) print("no of inputs = {}".format(no_inputs)) for i in range(count): # Ignore the distribution value input_x = bool_dist_generator(no_inputs) data.append((input_x, cal_nbf_fn(input_x, operands, operators))) print("Data : {}".format(data)) return data elif type == 'TF': # print( 'parsed tf') type, terms, threshold = result no_inputs = len(terms) # print("no of inputs = {}".format(no_inputs)) if dist == 'bool': dist_fn = bool_dist_generator elif dist == 'sphere': dist_fn = spherical_dist_generator for i in range(count): input_x = dist_fn(no_inputs) data.append((input_x, cal_thresh_fn(input_x, terms, threshold))) # print("Data : {}".format(data)) return data else: sys.exit('NOT PARCEABLE') else: sys.exit('NOT PARCEABLE') # print(bool_dist_generator(3)) # print(spherical_dist_generator(3)) # parse_ground_file('sample_function_2') # generate_data('sample_function', 10, 'bool')
#! Python 3 #this program will allow you to enter any block of text and will tweeze out the #phone numbers, the emails, or both depending on what you want import pyperclip, re, os which_thing = input("would you like me to find, email, phone numbers, or both? (P for phonenumbers, E for email, and B for both)") phone_number = re.compile(r'''( (\d{3} | \(\d{3}\))? #area code (\s|-|\.)? #seperator (\d{3}) #first 3 digits (\s|-|\.)? #seperator (\d{4}) #last 4 digits )''' , re.VERBOSE) email = re.compile(r'''( [a-zA-z0-9._%+-]+ #usermname @ #@ symbol [a-zA-Z0-9.-]+ #domain name (\.[a-zA-Z]{2,4}) #dot-something )''', re.VERBOSE) text = str(pyperclip.paste()) #function for adding the matches to the clipboard def add_to_clip(matches, matches2) : if len(matches) > 0 and len(matches2) > 0 : all_the_matches = matches + matches2 pyperclip.copy(' '.join(all_the_matches)) message = "both phone numbers and emails have been copied to the clipboard" return message elif len(matches) > 0 and len(matches2) == 0 : pyperclip.copy(' '.join(matches)) message = "phone numbers were copied to the clipboard" return message elif len(matches) == 0 and len(matches2) > 0 : pyperclip.copy(' '.join(matches2)) message = "emails were copied to the clipboard" return message else : message = "something has gone wrong" return message #function for finding all the emails def email_finder(this_text, email_regex) : email_matches = [] for groups in email_regex.findall(this_text) : email_matches.append(groups[0]) if len(email_matches) > 0 : print("I found some emails") return email_matches else : print("none were found") return email_matches #function for finding all the phone numbers in text block def phone_number_finder(this_text, phone_number_regex) : phone_number_matches = [] for groups in phone_number_regex.findall(this_text) : phoneNum = '-'.join([groups[1], groups[3], groups[5]]) phone_number_matches.append(phoneNum) if len(phone_number_matches) == 0 : print("no phone number matches were found") return phone_number_matches else : print("I found some phone numbers") return phone_number_matches if which_thing.lower() == "p" : email_place_holder = [] returned_phone_numbers = phone_number_finder(text, phone_number) match_message = add_to_clip(returned_phone_numbers, email_place_holder) print(match_message) elif which_thing.lower() == "e" : phone_number_place_holder = [] returned_emails = email_finder(text, email) match_message = add_to_clip(returned_emails, phone_number_place_holder) print(match_message) elif which_thing.lower() == "b" : returned_phone_numbers = phone_number_finder(text, phone_number) returned_emails = email_finder(text, email) match_message = add_to_clip(returned_phone_numbers, returned_emails) print(match_message)
#!/usr/bin/env python # coding: utf-8 # **HR EMPLOYEE ATTRITION DATASET.** # # This is a fictional data set created by IBM data scientists. We need to explore the dataset, understanding the algorithms and techniques which can be applied on it. We' ll try to gain meaningful insights from the dataset, like what are the factors which have an impact on Employee Attrition. # In[ ]: # Import Desired libraries. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt # Input data files are available in the "../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory # Any results you write to the current directory are saved as output. # Importing the Dataset. After which an important step is to understand our data. # In[ ]: data=pd.read_csv("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv") attrition=data print(data.columns) print(data.shape) # So there are 35 columns and 1470 rows. # Next let's check how many categorical variables and numerical variables: # In[ ]: # Differentiate numerical features (minus the target) and categorical features categorical_features = data.select_dtypes(include=['object']).columns categorical_features numerical_features = data.select_dtypes(exclude = ["object"]).columns print(categorical_features.shape) print(categorical_features) print(numerical_features) # So there are 9 categorical features which includes our target variable "Attrition". # These will need to be encoded using one of the following ways: # 1. Dummy Encoding # 2.One Hot Encoder # 3.Label Encoder # Rest are numerical features. Before going any furthur let's check for any NULLS in our dataset. # In[ ]: print(data.isnull().values.any()) # Since there are no nulls we Do not need to worry about this anymore. # To get a better grasp of our datset. I will execute the next line of my code. # In[ ]: data.describe() # this creates a kind of summary of the datset withh various statistical features. # We need to specify our target variable which is Attrition in this case. Also since Attrition is a categorical feature we will map it to numerical values. # **DATA VISUALIZATION** # Data Visualization is one of the core step before building any model. Python offers numerous libraries for this purpose.I have used Seaborn library for this porpose. First and foremost I want to see how my Target variable is distributed across the dataset. # In[ ]: sns.countplot("Attrition",data=data) print() # **COUNTPLOT**:-The above plot shows the distribution of our target variable.As can be seen clearly, the graph represents imbalanced dataset. # So we' ll need to balance this dataset. # Imbalanced class distributions are an issue when anamoly detection like fraud cases, identification of rare diseases, or cases similiar to the above are present. In such scenarios we are more interested in the minority class and the factors that contibute to the occurrence of them. # # Various techniques are available for handling imbalanced Dataset like Undersampling the Majority class and Oversampling the Minority class. # In simple terms, it is decreasing instances of majority classes or increasing instances of minority classes to result in a balanced dataset. # We will deal with this issue while building our model.Each has its own set of pros and cons. # To see the correlation with our target Variable which is Attrition we will convert it into numerical values. # I will be using Replace function for this. # In[ ]: corrmat = data.corr() f, ax = plt.subplots(figsize=(12, 9)) print() # Statistical relationship between two variables is referred to as their ** correlation**. The performance of some algorithms can deteriorate if two or more variables are tightly related, called multicollinearity.This is of special importance in Regression. # From the above correlation matrix , we find most of the features are uncorrelated.But, there is a correlation (0.8) between Performance Rating and Performance Salary Hike. We need to look into the white lines shown by EmployeeCount and Standard Hours.TotalWorkingYears with JobLevel also has high correlation(0.8). # An important feature in our datset is Gender. I ' ll verify this with help of plot to understand who is more likely for Job Attrition if we only consider Gender as the factor. # In[ ]: x, y, hue = "Attrition", "prop", "Gender" f, axes = plt.subplots(1,2) sns.countplot(x=x, hue=hue, data=data, ax=axes[0]) prop_df = (data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index()) sns.barplot(x=x, y=y, hue=hue, data=prop_df, ax=axes[1]) # One important thing to note here is we cannot make direct inferences from first countplot. The second barplot is made in accordance with proportion. # One can clearly infer from the plot above that higher proportion of males are likely for Attrition as compared to females. # In[ ]: x, y, hue = "Attrition", "prop", "Department" f, axes = plt.subplots(1,2,figsize=(10,5)) sns.countplot(x=x, hue=hue, data=data, ax=axes[0]) prop_df = (data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index()) sns.barplot(x=x, y=y, hue=hue, data=prop_df, ax=axes[1]) # I have tried to find out the relation between** Department and Attrition**. It can be infered that employees from Sales Department have higher possibility of Attrition whereas Research and Development have higher proportion on the No Attrition side. # In[ ]: print() # The above plot shows those with lesser age and Lower income groups upto 5000 have higher possibility of Attrition as green dots are more concentrated in that region more. # In[ ]: sns.set() cols=['Age','DailyRate','Education','JobLevel','DistanceFromHome','EnvironmentSatisfaction','Attrition'] print() # Above are the pairplots between various numerical variables. # 1. The first distribution plot for age shows that employees which fall in lower age group are more likely for attrition, However, as the age increases the blue curve goes up. # 2. For the Job Level, there is a sharp peak at lower job levels between 0-2 which shows Atrrition is more likely. # 3. The plot between DistancefromHome and Age shows that again employees with less ages aand living at a distance greater than 15 have higher chances of Attrition. # 4. Employees with lower level of EnvironmentSatisfaction indicates higher chances of Attrition as the Red curve goes above the blue curve for lower levels of EnvironmentSatisfaction. # To understand the counts of different values in each feature I have used value_counts method of Pandas. # In[ ]: #for c in data.columns: #print("---- %s ---" % c) #print(data[c].value_counts()) # Since attrition is a categorical Variable , one needs to convert it into numerical form. I am replacing Yes with 1 and No with 0. # In[ ]: data1=data di={"Yes": 1, "No": 0} data1["Attrition"].replace(di,inplace=True) # Since attrition value to be classified, assigning it to the target variable # In[ ]: attrition=data data1.shape target=data.iloc[:,1] print(target.head(5)) # In[ ]: print(target.dtypes) target=pd.DataFrame(target) print(target.dtypes) # In[ ]: print(data1.columns) # Since Attrition is the target variable we do not need it in our predictor variables. # Apart from these from value_counts of each variable we can see that 'Over18', 'StandardHours', 'EmployeeCount' are all same values and can be dropped without loss of information. # # In[ ]: data1.head(5) data1.drop(["Attrition","Over18","StandardHours","EmployeeCount","EmployeeNumber"],axis=1,inplace=True) # Converting categorical features to dummy variables. I have used get_dummies method of pandas for the same. # In[ ]: categorical=data1.select_dtypes(include=['object']).columns data1.shape print(data1.columns) print(categorical) Prediction=data1##copy paste # In[ ]: print(data1.columns) dummie=pd.get_dummies(data=data1, columns=['OverTime','BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole','MaritalStatus']) dummie=pd.DataFrame(dummie) new_data=pd.concat([data1, dummie], axis=1) # print(new_data.columns) # **MODEL BUILDING :**** # # I have used two models for this classification problem -Random Forests & Gradient Boosting Algorithm. # # First , I have applied both the algorithms on the imbalanced dataset and then on the Balanced Dataset. # For the purpose of Balancing of our datset, I have used SMOTE(Synthetic MInority Over Sampling Technique). # # **RANDOM FORESTS:**- This is an ensemble method used for building predictive models for both classification and regression problems. # First I am applying Random Forests Algorithm for this Binary classification problem of Employee Attrition.This is a bagging Ensemble Model. # Bagging is a simple ensembling technique in which we build many independent predictors/models/learners and combine them using some model averaging techniques. # # **GRADIENT BOOSTING:-** Another way of ensembling to build predictive models is through Boosting. Boosting is a technique in which learners/predictors are combined sequentially, rather than independently. This means each predictor learns from the mistakes of their previous predictors. # In[ ]: print(target.head(5)) # In[ ]: new_data.drop(['OverTime','BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole','MaritalStatus'],axis=1,inplace=True) # Since we have already created dummy variables so we can drop the columns with categorical features. # In[ ]: from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(new_data,target,test_size=0.33,random_state=7) # print(x_train.shape) # print(y_train.shape) # print(x_test.shape) # print(y_test.shape) # In[ ]: # importing Libraries for our model # Importing Random Forest Classifier from sklearn.ensemble import RandomForestClassifier forest=RandomForestClassifier(n_estimators=1000) forest.fit(x_train,y_train.values.ravel()) # In[ ]: predicted= forest.predict(x_test) # Next using Gradient Boosting For our dataset classification. # **EVALUATION OF MODEL:-** # There are several metrics for evaluation of any machine learning model. The most common is accuracy. # **ACCURACY**- This is simply the ratio of TOTAL CORRECT PREDICTIONS to TOTAL NO OF PREDICTIONS. # However, this metric is useful especially if there are equal no of samples belonging to each class. # This is certainly not the case in our Dataset. This is because if 90% of data belongs to one particular class say class X and 10% to class Y,then our model will get 90% accuracy even if predicts that entire sample belongs to class X. # # In such cases , a classification report is better way to check the quality of classification algorithm predictions. # This report gives several classifcation metrics like recall , precision, & f1 score. # # **** PRECISION:**- it is defined as the ratio of true positives to the sum of true and false positives. It is the accuracy in our postive predictions. # # **RECALL:-** It is defined as the ratio of True Positives to sum of True Positives and False Negatives.,which is how many positives we have identified out of the total positives that are actually present in the dataset. # In[ ]: from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import classification_report, confusion_matrix learning_rates = [0.05, 0.1, 0.25, 0.5, 0.75, 1] for learning_rate in learning_rates: gb = GradientBoostingClassifier(n_estimators=20, learning_rate = learning_rate, max_features=2, max_depth = 2, random_state = 0) gb.fit(x_train, y_train) print("Learning rate: ", learning_rate) print("Accuracy score (training): {0:.3f}".format(gb.score(x_train, y_train))) print("Accuracy score (validation): {0:.3f}".format(gb.score(x_test, y_test))) print() # In[ ]: # Output confusion matrix and classification report of Gradient Boosting algorithm on validation set gb = GradientBoostingClassifier(n_estimators=20,learning_rate = 0.5,random_state = 7) gb.fit(x_train, y_train) predictions = gb.predict(x_test) print("Confusion Matrix for Gradient boosting:") print(confusion_matrix(y_test, predictions)) print() print("Classification Report for Gradient Boosting") print(classification_report(y_test, predictions)) # In[ ]: print("Accuracy score (validation): {0:.3f}".format(forest.score(x_test, y_test))) print("Confusion Matrix for Random Forests:") print(confusion_matrix(y_test, predicted)) print() print("Classification Report for Random Forests") print(classification_report(y_test, predicted)) # Here we applied two methods Gradient Boosting and Random Forests and checked their accuracy as well as confusion matrix. # But since this data was not balanced ,next we will see what effect occurs on accuracy after using techniques for imbalanced data. # In[ ]: from imblearn.over_sampling import SMOTE sm = SMOTE(random_state=7, ratio = 1.0) x_train_res, y_train_res = sm.fit_sample(x_train, y_train) forest_sm = RandomForestClassifier(n_estimators=500, random_state=7) forest_sm.fit(x_train_res, y_train_res.ravel()) prediction2 = forest_sm.predict(x_test) print("Accuracy score (validation): {0:.3f}".format(forest_sm.score(x_test, y_test))) print("Confusion Matrix for Random Forests:") print(confusion_matrix(y_test, prediction2)) print() print("Classification Report for Random Forests") print(classification_report(y_test, prediction2)) # Next fitting our model using Gradient Boosting after using SMOTE for handling imbalanced Data. # In[ ]: gb_sm = GradientBoostingClassifier(n_estimators=20, learning_rate = 0.5, max_features=2, max_depth = 2, random_state = 7) gb_sm.fit(x_train_res, y_train_res.ravel()) prediction3 = gb_sm.predict(x_test) print("Confusion Matrix for Gradient boosting:") print(confusion_matrix(y_test, prediction3)) print() print("Classification Report for Gradient Boosting") print(classification_report(y_test, prediction3)) print("Accuracy score (validation): {0:.3f}".format(gb_sm.score(x_test, y_test))) # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]:
#!/usr/bin/env python # coding: utf-8 # # Predicting Heart Disease # # 1. Data Description # The dataset contains many medical indicators , the goal is to do exploratory data analysis on the status of heart disease. The dataset contains medical history of patients of Hungarian and Switzerland origin. # Its a classification problem. # *** # **Data Dictionary** # # Dataset ( Rows : 270 , Columns : 14 ) # # 1. Age: Age is in years ( Datatype : Float , Min : 29 , Max : 77 , Median : 55 ,Mean : 54) # <br> # # 2. Gender: <br> # 1 represents Male ( Datatype : Float , Count : 183 ) # 0 represents Female( Datatype : Float , Count : 87 ) # <br> # # 3. Cp: Chest Pain Type ( Datatype : Float ) # 1. Value 1: Typical angina ( Chest pain caused when heart muscle doesn't get enough oxygen-rich blood )( Count : 20) # 2. Value 2: Atypical angina ( Women have more of a subtle presentation called atypical angina ) ( Count : 42 ) # 3. Value 3: Non-anginal pain ( Count : 79 ) # 4. Value 4: Asymptomatic pain ( Neither causing nor exhibiting symptoms of disease ) ( Count : 129 ) # <br> # # 4. Trestbps: Resting blood pressure (in mm Hg on admission to the hospital)<br> # Normal Conditions : 120 - 140 mm # Datatype : Float , Min : 94 , Max : 200 , Median : 130 ,Mean : 131 # <br> # # 5. Chol: Serum cholesterol in mg/dl<br> # Normal Conditions : 100 to 129 mg/dL # Datatype : Float , Min : 126 , Max : 564 , Median : 245 ,Mean : 240 # <br> # # 6. Fbs: Fasting blood sugar > 120 mg/dl # Normal Conditions : Less than 100 mg/dL # 1 = true ( Datatype : Float , Count : 40 ) # 0 = false ( Datatype : Float , Count : 230 ) # # <br> # # 7. Thalach: Maximum heart rate achieved in beats per minute (bpm) # Normal Conditions : 60 to 100 beats per minute # Datatype : Float , Min : 71 , Max : 202 , Median : 154 ,Mean : 150 # <br> # # 8. Exang: Exercise induced angina # 1 = Yes ( Datatype : Float , Count : 181 ) # 0 = No ( Datatype : Float , Count : 89 ) # <br> # # 9. Restecg: Resting electrocardiographic results # 1. Value 0: Normal ( Count : 131 ) # 2. Value 1: Having ST-T wave abnormality (T wave inversions and ST elevation or depression of > 0.05 mV)(Count : 2) # 3. Value 2: Showing probable or definite left ventricular hypertropy by Estes criteria ( Count : 137 ) # <br> # # 10. Oldpeak: ST depression induced by exercise relative to rest mm ( Datatype : Float , Min : 0 , Max : 6.2 ,Mean : 1.05 , Median : 0.80 ) # <br> # # 11. Slope: Slope of the peak exercise ST segment # 1. Value 1: Upsloping ( Count : 181 ) # 2. Value 2: Flat ( Count : 181 ) # 3. Value 3: Down-sloping ( Count : 181 ) # # <br> # 12. Ca: Number of major vessels (0-3) colored by fluoroscopy # # 1. Value 0: Count : 160 # 2. Value 1: Count : 58 # 3. Value 2: Count : 33 # 4. Value 3: Count : 19 # # <br> # # 13. Thal: Thalassemia ( Less Haemoglobin ) # 1. Value 3 = Normal ( Count : 152 ) # 2. Value 6 = Fixed defect ( Count : 14 ) # 3. Value 7 = Reversible defect ( Count : 104 ) # # <br> # # 14. Goal: Dependent variable # 1. Value 1 = Absence of heart disease (Count : 120) # 2. Value 2 = Presence of heart disease (Count : 150) # # *** # # 2. Data Loading # In[ ]: #importing libraries import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.tree import export_graphviz #plot tree from sklearn.metrics import roc_curve, auc #for model evaluation from sklearn.metrics import classification_report #for model evaluation from sklearn.metrics import confusion_matrix #for model evaluation from sklearn.model_selection import train_test_split #for data splitting from sklearn.metrics import r2_score,accuracy_score from sklearn.model_selection import cross_val_score ,StratifiedKFold from scipy import stats import pylab import eli5 #for permutation importance from eli5.sklearn import PermutationImportance import warnings warnings.filterwarnings('ignore') # In[ ]: #reading dataset dataset=pd.read_csv("../../../input/ronitf_heart-disease-uci/heart.csv") dataset.columns=['Age', 'Gender', 'CP', 'Trestbps', 'Chol', 'FBS', 'RestECG','Thalach', 'Exang', 'Oldpeak', 'Slope', 'CA', 'Thal', 'Goal'] nRow, nCol = dataset.shape n_with_disease = dataset[dataset["Goal"]==2].shape[0] n_without_disease = dataset[dataset["Goal"]==1].shape[0] greater_percent = (n_with_disease*100)/float(nRow) print(f'**Summary**:\n There are {nRow} rows and {nCol} columns. Goal is the target/label variable that can have only value(1/2)') disease = len(dataset[dataset['Goal'] == 1]) non_disease = len(dataset[dataset['Goal'] == 0]) plt.pie(x=[disease, non_disease], explode=(0, 0), labels=['Diseased ', 'Non-diseased'], autopct='%1.2f%%', shadow=True, startangle=90) print() # In[ ]: #Check sample of any 5 rows dataset=dataset.reset_index() dataset=dataset.drop(['index'],axis=1) dataset.sample(5) # # 3. Data Pre-processing # In[ ]: # Get the number of missing data points, NA's ,NAN's values per column total = dataset.isnull().sum().sort_values(ascending=False) percent = (dataset.isnull().sum()/dataset.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) total = dataset.isna().sum().sort_values(ascending=False) percent = (dataset.isna().sum()/dataset.isna().count()).sort_values(ascending=False) na_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) if((na_data.all()).all()>0 or (na_data.all()).all()>0): print('Found Missing Data or NA values') #print(na_data,"\n",missing_data) # Conclusion:<br> # There is no missing data or null values in the collected data. Additionally, the length of each column is same. # In[ ]: #Detect outliers plt.subplots(figsize=(18,10)) dataset.boxplot(patch_artist=True, sym="k.") plt.xticks(rotation=90) # From the above box plot, there are some outliers in Chol and Trestbps attributes. I will remove the outliers which will be out of the 3 standard deviation by using z-score. # #### Z Score # Z-score is the number of standard deviations from the mean a data point is. But more technically it’s a measure of how many standard deviations below or above the population mean a raw score is. # In[ ]: df=dataset[~(np.abs(stats.zscore(dataset)) < 3).all(axis=1)] df # Almost all of the data (99.7%) should be within three standard deviations from the mean.Removing the observations that are out of 3 standard deviations. # In[ ]: dataset=dataset.drop(dataset[~(np.abs(stats.zscore(dataset)) < 3).all(axis=1)].index) # ### Converting Categorical data # In[ ]: dataset['Gender']=dataset['Gender'].replace([1,0], ['Male', 'Female']) dataset['Goal']=dataset['Goal'].replace([0,1], ['Absence', 'Presence']) dataset['Slope']=dataset['Slope'].replace([0,1,2], ['Upsloping','Flat','Down-sloping']) dataset['RestECG']=dataset['RestECG'].replace([0,1,2], ['Normal', 'Abnormality','Hypertrophy']) dataset['Exang']=dataset['Exang'].replace([1,0], ['Yes', 'No']) dataset['FBS']=dataset['FBS'].replace([1,0], ['Yes', 'No']) dataset['Thal']=dataset['Thal'].replace([1,2,3], ['Normal', 'Fixed Defect','Reversible defect']) dataset['CP']=dataset['CP'].replace([0,1,2,3], ['Typical angina', 'Atypical angina','Non-anginal pain','Asymptomatic pain']) dataset['Gender']=dataset['Gender'].astype('object') dataset['CP']=dataset['CP'].astype('object') dataset['Thal']=dataset['Thal'].astype('object') dataset['FBS']=dataset['FBS'].astype('object') dataset['Exang']=dataset['Exang'].astype('object') dataset['RestECG']=dataset['RestECG'].astype('object') dataset['Slope']=dataset['Slope'].astype('object') # # 4. Visualisation # ### Attribute relationships # In[ ]: cont_dataset=dataset.copy() cont_dataset=cont_dataset.drop(['Gender','Slope','Thal','CP','FBS','RestECG','Exang','Goal'],axis=1) plt.subplots(figsize=(10,8)) colr=sns.heatmap(cont_dataset.corr(),robust=True,annot=True) figure = colr.get_figure() figure.savefig('correlation.png', dpi=400) # Conclusion:<br> # #### Positive correlation: Age vs Trestbps, Age vs Oldpeak, Cholestrol vs Trestbps # * The graph shows positive correlation between age and trestbps. As the age of a person increases, the value of blood pressure(Trestbps) also increases. # * The graph shows positive correlation between age and oldpeak. As the age of a person increases, the value of oldpeak increases. # * Cholestrol and blood pressure are showing positive correlation which makes sense as High cholesterol is associated with an elevated risk of cardiovascular disease. # # #### Negative Correlation : Age vs Thalach , Thalach vs Oldpeak # * The graph shows negative correlation between age and Thalach which makes sense as the age of a person increases, the value of heart rate decreases. # * The graph shows negative correlation between Oldpeak and Thalach which makes sense as the heart rate of a person increases, the value of oldpeak decreases. # ## 4.1. Continous Variables # ## 4.1.1 Age # In[ ]: pd.crosstab(dataset['Age'],dataset['Goal']).plot(kind="bar",figsize=(20,6)) plt.title('Heart Disease Frequency for Ages') plt.xlabel('Age') plt.ylabel('Frequency') # plt.savefig('heartDiseaseAndAges.png') print() # In[ ]: absence = dataset[dataset['Goal']=='Absence']['Age'] presence = dataset[dataset['Goal']=='Presence']['Age'] fig, ax = plt.subplots(1,2,figsize=(16,6)) mean=round(dataset['Age'].mean(),2) median=dataset['Age'].median() a_median=absence.median() a_mean=round(absence.mean(),2) p_median=presence.median() p_mean=round(presence.mean(),2) ax[1].axvline(p_median, color='r', linestyle='-') ax[1].axvline(p_mean, color='r', linestyle='--') ax[1].axvline(a_median, color='b', linestyle='-') ax[1].axvline(a_mean, color='b', linestyle='--') ax[0].axvline(median, color='g', linestyle='-') ax[0].axvline(mean, color='g', linestyle='--') ax[0]=sns.distplot(dataset['Age'],bins=15,ax=ax[0]) ax[1]=sns.kdeplot(absence, label='Absence', shade=True) ax[1]=sns.kdeplot(presence, label='Presence',shade=True) plt.xlabel('Age') print() fig.savefig('age_old.png') print(f' \tMean & Median of whole dataset are {mean} & {median}\t\t\tMean & Median of absence data = {a_mean} & {a_median}\n\t\t\t\t\t\t\t\t\tMean & Median of presence data are {p_mean} & {p_median} ') # In[ ]: print(f"Normal Test for the Age distribution {stats.normaltest(dataset['Age'])}") #Null hypothesis : data came from a normal distribution. print(f"Skewness for the whole dataset {pd.DataFrame.skew(dataset['Age'], axis=0)}") #left skewed print(f"Skewness of non-disease cohort {pd.DataFrame.skew(absence, axis=0)}") #right skewed print(f"Skewness of disease cohort{pd.DataFrame.skew(presence, axis=0)}") #left skewed #If the skewness is between -0.5 and 0.5, the data are fairly symmetrical # Comments: # From the above graphs and the skewness value and normal test, its clear that the distribution is not normal. # # The median age for patients was 55 with the youngest and oldest being 29 and 77, respectively. Patients exhibiting presence of disease had a higher median age of 58 compared to the patients with absence of heart disease which had a median of 52. # # As seen in the graphs, the distribution contains two groups in it.To show the difference between the groups(absence and presence), I will perform t-test. # #### Central Limit Theorem # To reduce the variablity around the means of two groups, I will use central limit theorem which states that given a sufficiently large sample size from a population with a finite level of variance, the mean of all sample from the same population will be approximately equal to the mean of the population. # # In[ ]: absenceMeans = [] presenceMeans = [] sampleMeans=[] for _ in range(1000): samples = dataset['Age'].sample(n=200) sampleMean = np.mean(samples) sampleMeans.append(sampleMean) samples = absence.sample(n=100) sampleMean = np.mean(samples) absenceMeans.append(sampleMean) samples = presence.sample(n=100) sampleMean = np.mean(samples) presenceMeans.append(sampleMean) fig, ax = plt.subplots(1,2,figsize=(16,6)) ax[1].axvline(p_median, color='r', linestyle='-') ax[1].axvline(p_mean, color='r', linestyle='--') ax[1].axvline(a_median, color='b', linestyle='-') ax[1].axvline(a_mean, color='b', linestyle='--') ax[0].axvline(p_median, color='r', linestyle='-') ax[0].axvline(p_mean, color='r', linestyle='--') ax[0].axvline(a_median, color='b', linestyle='-') ax[0].axvline(a_mean, color='b', linestyle='--') ax[0] =sns.kdeplot(absence, label='Absence', shade=True,ax=ax[0]) ax[0] =sns.kdeplot(presence, label='Presence',shade=True,ax=ax[0]) ax[1] =sns.kdeplot(absenceMeans, label='Absence', shade=True,ax=ax[1]) ax[1] =sns.kdeplot(presenceMeans, label='Presence',shade=True,ax=ax[1]) ax[0].set_xlabel('Age') ax[0].set_ylabel('Kernel Density Estimate') ax[1].set_xlabel('Age') ax[1].set_ylabel('Kernel Density Estimate') ax[0].set_title('Before Central Limit Theorem') ax[1].set_title('After Central Limit Theorem') print() fig.savefig('age.png') print(f' \tMean & Median of whole dataset are {mean} & {median}\t\t\tMean & Median of absence data = {a_mean} & {a_median}\n\t\t\t\t\t\t\t\t\tMean & Median of presence data are {p_mean} & {p_median} ') # Comments: # Assumption :- As seen in above graph, the mean of non-disease cohort is less than the mean of diseased cohort. # # Null Hypothesis :- there is no difference in Mean of disease cohort and non-disease cohort. # # Alternate Hypothesis :- there is difference in Mean of disease cohort and non-disease cohort. # In[ ]: #t-test on independent samples t2, p2 = stats.ttest_ind(presence,absence) print("t = " + str(t2)) print("p = " + str(2*p2)) # ### Observation # p-value < 0.05<br> # Reject null hypothesis that there is no difference in Mean of disease cohort and non-disease cohort.<br> # So, I conclude that people who are slightly older have more chance of having heart disease. # Therefore, age would be a predictive feature. # ## 4.1.2 Resting Blood Pressure # # In[ ]: print(f"Normal Test for the whole dataset {stats.normaltest(dataset['Trestbps'])}") #Null hypothesis : data came from a normal distribution. # Comment: <br> Like the previous case this distribution is also not normal.As seen in the graphs, the distribution contains two groups in it.To show the difference between the groups(absence and presence), I will perform t-test. # # ### Central Limit Theorem # # In[ ]: absence = dataset[dataset['Goal']=='Absence']['Trestbps'] presence = dataset[dataset['Goal']=='Presence']['Trestbps'] a_median=absence.median() a_mean=round(absence.mean(),2) p_median=presence.median() p_mean=round(presence.mean(),2) absenceMeans = [] presenceMeans = [] sampleMeans=[] for _ in range(1000): samples = dataset['Trestbps'].sample(n=100) sampleMean = np.mean(samples) sampleMeans.append(sampleMean) samples = absence.sample(n=100) sampleMean = np.mean(samples) absenceMeans.append(sampleMean) samples = presence.sample(n=100) sampleMean = np.mean(samples) presenceMeans.append(sampleMean) fig, ax = plt.subplots(1,2,figsize=(16,6)) ax[1].axvline(p_median, color='r', linestyle='-') ax[1].axvline(p_mean, color='r', linestyle='--') ax[1].axvline(a_median, color='b', linestyle='-') ax[1].axvline(a_mean, color='b', linestyle='--') ax[0].axvline(p_median, color='r', linestyle='-') ax[0].axvline(p_mean, color='r', linestyle='--') ax[0].axvline(a_median, color='b', linestyle='-') ax[0].axvline(a_mean, color='b', linestyle='--') ax[0] =sns.kdeplot(absence, label='Absence', shade=True,ax=ax[0]) ax[0] =sns.kdeplot(presence, label='Presence',shade=True,ax=ax[0]) ax[1] =sns.kdeplot(absenceMeans, label='Absence', shade=True) ax[1] =sns.kdeplot(presenceMeans, label='Presence',shade=True) ax[0].set_xlabel('Trestbps') ax[0].set_ylabel('Kernel Density Estimate') ax[1].set_xlabel('Trestbps') ax[1].set_ylabel('Kernel Density Estimate') ax[0].set_title('Before Central Limit Theorem') ax[1].set_title('After Central Limit Theorem') print() fig.savefig('Trestbps.png') print(f' \tMean & Median of absence data = {a_mean} & {a_median}\t\t\tMean & Median of presence data are {p_mean} & {p_median} ') # In[ ]: #t-test on independent samples t2, p2 = stats.ttest_ind(presenceMeans,absenceMeans) print("t = " + str(t2)) print("p = " + str(2*p2)) # ### Observation # p-value < 0.05<br> # Reject null hypothesis that there is no difference in Mean of disease cohort and non-disease cohort.<br> # The aggregated resting blood pressure for the entire dataset exhibited a mean value of 130 and for the diseased and non-diseased groups (i.e. 134 and 129 respectively).So, I conclude that people who have slightly high blood pressure have more chance of having heart disease.<br>Therefore, resting blood pressure is a good predictive feature. # ## 4.1.3 Cholestrol # In[ ]: print(f"Normal Test for the whole dataset {stats.normaltest(dataset['Chol'])}") #Null hypothesis : data came from a normal distribution. # Comment: # Like the previous case this distribution is also not normal.As seen in the graphs, the distribution contains two groups in it.To show the difference between the groups(absence and presence), I will perform t-test. # # ### Central Limit Theorem # In[ ]: absence = dataset[dataset['Goal']=='Absence']['Chol'] presence = dataset[dataset['Goal']=='Presence']['Chol'] a_median=absence.median() a_mean=round(absence.mean(),2) p_median=presence.median() p_mean=round(presence.mean(),2) absenceMeans = [] presenceMeans = [] sampleMeans=[] for _ in range(1000): samples = dataset['Chol'].sample(n=100) sampleMean = np.mean(samples) sampleMeans.append(sampleMean) samples = absence.sample(n=100) sampleMean = np.mean(samples) absenceMeans.append(sampleMean) samples = presence.sample(n=100) sampleMean = np.mean(samples) presenceMeans.append(sampleMean) fig, ax = plt.subplots(1,2,figsize=(16,6)) ax[1].axvline(p_median, color='r', linestyle='-') ax[1].axvline(p_mean, color='r', linestyle='--') ax[1].axvline(a_median, color='b', linestyle='-') ax[1].axvline(a_mean, color='b', linestyle='--') ax[0].axvline(p_median, color='r', linestyle='-') ax[0].axvline(p_mean, color='r', linestyle='--') ax[0].axvline(a_median, color='b', linestyle='-') ax[0].axvline(a_mean, color='b', linestyle='--') ax[0] =sns.kdeplot(absence, label='Absence', shade=True,ax=ax[0]) ax[0] =sns.kdeplot(presence, label='Presence',shade=True,ax=ax[0]) ax[1] =sns.kdeplot(absenceMeans, label='Absence', shade=True) ax[1] =sns.kdeplot(presenceMeans, label='Presence',shade=True) ax[0].set_xlabel('Chol') ax[0].set_ylabel('Kernel Density Estimate') ax[1].set_xlabel('Chol') ax[1].set_ylabel('Kernel Density Estimate') ax[0].set_title('Before Central Limit Theorem') ax[1].set_title('After Central Limit Theorem') print() fig.savefig('chol.png') print(f' \tMean & Median of absence data = {a_mean} & {a_median}\t\t\tMean & Median of presence data are {p_mean} & {p_median} ') # In[ ]: #t-test on independent samples t2, p2 = stats.ttest_ind(presenceMeans,absenceMeans) print("t = " + str(t2)) print("p = " + str(2*p2)) # ### Observation # # p-value < 0.05<br> # Reject null hypothesis that there is no difference in Mean of disease cohort and non-disease cohort.<br>Cholestrol levels for the non-disease cohort (median = 236 mg/dL) were lower compared to the diseased patients (median = 255 mg/dL) . Therefore, Cholestrol can be a good predictive feature. # ## 4.1.4 Exercise Induced ST Depression # In[ ]: pd.crosstab(dataset['Oldpeak'],dataset['Goal']).plot(kind="bar",figsize=(10,6)) plt.title('Heart Disease Frequency for Oldpeak') plt.xlabel('Exercise Induced ST Depression') plt.ylabel('Frequency') print() # In[ ]: absence = dataset[dataset['Goal']=='Absence']['Oldpeak'] presence = dataset[dataset['Goal']=='Presence']['Oldpeak'] a_median=absence.median() a_mean=round(absence.mean(),2) p_median=presence.median() p_mean=round(presence.mean(),2) absenceMeans = [] presenceMeans = [] sampleMeans=[] for _ in range(1000): samples = dataset['Oldpeak'].sample(n=100) sampleMean = np.mean(samples) sampleMeans.append(sampleMean) samples = absence.sample(n=100) sampleMean = np.mean(samples) absenceMeans.append(sampleMean) samples = presence.sample(n=100) sampleMean = np.mean(samples) presenceMeans.append(sampleMean) fig, ax = plt.subplots(1,2,figsize=(16,6)) ax[1].axvline(p_median, color='r', linestyle='-') ax[1].axvline(p_mean, color='r', linestyle='--') ax[1].axvline(a_median, color='b', linestyle='-') ax[1].axvline(a_mean, color='b', linestyle='--') ax[0].axvline(p_median, color='r', linestyle='-') ax[0].axvline(p_mean, color='r', linestyle='--') ax[0].axvline(a_median, color='b', linestyle='-') ax[0].axvline(a_mean, color='b', linestyle='--') ax[0] =sns.kdeplot(absence, label='Absence', shade=True,ax=ax[0]) ax[0] =sns.kdeplot(presence, label='Presence',shade=True,ax=ax[0]) ax[1] =sns.kdeplot(absenceMeans, label='Absence', shade=True) ax[1] =sns.kdeplot(presenceMeans, label='Presence',shade=True) ax[0].set_xlabel('Oldpeak') ax[0].set_ylabel('Kernel Density Estimate') ax[1].set_xlabel('Oldpeak') ax[1].set_ylabel('Kernel Density Estimate') ax[0].set_title('Before Central Limit Theorem') ax[1].set_title('After Central Limit Theorem') print() fig.savefig('oldpeak.png') print(f' \tMean & Median of absence data = {a_mean} & {a_median}\t\t\tMean & Median of presence data are {p_mean} & {p_median} ') # In[ ]: #t-test on independent samples t2, p2 = stats.ttest_ind(presenceMeans,absenceMeans) print("t = " + str(t2)) print("p = " + str(2*p2)) # ### Observation # # p-value < 0.05<br> # Reject null hypothesis that there is no difference in Mean of disease cohort and non-disease cohort.<br>The Exercise Induced ST Depression differed between the non-disease and disease cohorts with the majority of cardiac disease patients exhibiting a higher mean and median for disease cohorts .Therefore, ST depression induced by exercise relative to rest can be a good predictive feature. # ## 4.1.5 Maximum Heart Rate # In[ ]: absence = dataset[dataset['Goal']==1]['Thalach'] absence = dataset[dataset['Goal']=='Absence']['Thalach'] presence = dataset[dataset['Goal']=='Presence']['Thalach'] mean=round(dataset['Thalach'].mean()) median=dataset['Thalach'].median() a_median=absence.median() a_mean=round(absence.mean(),2) p_median=presence.median() p_mean=round(presence.mean(),2) fig, ax = plt.subplots(1,2,figsize=(16,6)) ax[1].axvline(p_median, color='r', linestyle='-') ax[1].axvline(p_mean, color='r', linestyle='--') ax[1].axvline(a_median, color='b', linestyle='-') ax[1].axvline(a_mean, color='b', linestyle='--') ax[0].axvline(median, color='g', linestyle='-') ax[0].axvline(mean, color='g', linestyle='--') ax[0]=sns.distplot(dataset['Thalach'],bins=15,ax=ax[0]) ax[1] =sns.kdeplot(absence, label='Absence', shade=True) ax[1] =sns.kdeplot(presence, label='Presence',shade=True) print() fig.savefig('thalach_old.png') print(f' \tMean & Median of whole dataset are {mean} & {median}\t\t\tMean & Median of absence data = {a_mean} & {a_median}\n\t\t\t\t\t\t\t\t\tMean & Median of presence data are {p_mean} & {p_median} ') # Oldpeak: ST depression induced by exercise relative to rest # In[ ]: absenceMeans = [] presenceMeans = [] sampleMeans=[] for _ in range(1000): samples = dataset['Thalach'].sample(n=100) sampleMean = np.mean(samples) sampleMeans.append(sampleMean) samples = absence.sample(n=100) sampleMean = np.mean(samples) absenceMeans.append(sampleMean) samples = presence.sample(n=100) sampleMean = np.mean(samples) presenceMeans.append(sampleMean) fig, ax = plt.subplots(1,2,figsize=(16,6)) ax[1].axvline(p_median, color='r', linestyle='-') ax[1].axvline(p_mean, color='r', linestyle='--') ax[1].axvline(a_median, color='b', linestyle='-') ax[1].axvline(a_mean, color='b', linestyle='--') ax[0].axvline(p_median, color='r', linestyle='-') ax[0].axvline(p_mean, color='r', linestyle='--') ax[0].axvline(a_median, color='b', linestyle='-') ax[0].axvline(a_mean, color='b', linestyle='--') ax[0] =sns.kdeplot(absence, label='Absence', shade=True,ax=ax[0]) ax[0] =sns.kdeplot(presence, label='Presence',shade=True,ax=ax[0]) ax[1] =sns.kdeplot(absenceMeans, label='Absence', shade=True) ax[1] =sns.kdeplot(presenceMeans, label='Presence',shade=True) ax[0].set_xlabel('Thalach') ax[0].set_ylabel('Kernel Density Estimate') ax[1].set_xlabel('Thalach') ax[1].set_ylabel('Kernel Density Estimate') ax[0].set_title('Before Central Limit Theorem') ax[1].set_title('After Central Limit Theorem') print() fig.savefig('thalach.png') print(f' \tMean & Median of whole dataset are {mean} & {median}\t\t\tMean & Median of absence data = {a_mean} & {a_median}\n\t\t\t\t\t\t\t\t\tMean & Median of presence data are {p_mean} & {p_median} ') # In[ ]: #t-test on independent samples t2, p2 = stats.ttest_ind(presenceMeans,absenceMeans) print("t = " + str(t2)) print("p = " + str(2*p2)) # ### Observation # # p-value < 0.05<br> # Reject null hypothesis that there is no difference in Mean of disease cohort and non-disease cohort.<br> Maximum heart rate was higher for the disease cohort (mean =139 , median = 141) compared to non-disease patients (mean= 158 , median = 161). It was anticipated that this feature should have high predictive power. # ## 4.2. Categorical Variables # ## 4.2.1 Gender # In[ ]: male = len(dataset[dataset['Gender'] == 'Male']) female = len(dataset[dataset['Gender'] == 'Female']) plt.pie(x=[male, female], explode=(0, 0), labels=['Male', 'Female'], autopct='%1.2f%%', shadow=True, startangle=90) print() # In[ ]: absence = dataset[dataset["Goal"]=='Absence']["Gender"].sort_values() presence = dataset[dataset["Goal"]=='Presence']["Gender"].sort_values() f, axes = plt.subplots(1,2,figsize=(15,5)) sns.countplot(absence, data=dataset,ax=axes[0]).set_title('Absence of Heart Disease') sns.countplot(presence, data=dataset,ax=axes[1]).set_title('Presence of Heart Disease') print() f.savefig('gender.png') # In[ ]: # Chi-square test of independence of variables cont = pd.crosstab(dataset['Gender'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : Gender is not associated with Goal #Alternate hypothesis : Gender is associated with Goal # ### Observation # As expected, given the low p-value(1.926225633356082e-06), so we reject null hypothesis and the test result detect a significant relationship between Gender and Goal.<br> # ## 4.2.2 Chest Pain Type # In[ ]: x = [len(dataset[dataset['CP'] == 'Typical angina']),len(dataset[dataset['CP'] == 'Atypical angina']), len(dataset[dataset['CP'] == 'Non-anginal pain']), len(dataset[dataset['CP'] == 'Asymptomatic pain'])] plt.pie(x, data=dataset, labels=['CP(1) Typical angina', 'CP(2) Atypical angina', 'CP(3) Non-anginal pain', 'CP(4) Asymptomatic pain'], autopct='%1.2f%%', shadow=True,startangle=90) print() # In[ ]: f, axes = plt.subplots(1,2,figsize=(15,5)) absence = dataset[dataset["Goal"]=='Absence']["CP"] presence = dataset[dataset["Goal"]=='Presence']["CP"] sns.countplot(absence, data=dataset,ax=axes[0]).set_title('Absence of Heart Disease') sns.countplot(presence, data=dataset,ax=axes[1]).set_title('Presence of Heart Disease') print() f.savefig('cp.png') # In[ ]: # Chi-square test of independence of variables cont = pd.crosstab(dataset['CP'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : Chest Pain is not associated with Goal #Alternate hypothesis : Chest Pain is associated with Goal # ### Observation # As expected, given the low p-value, so we reject null hypothesis and the test result detect a significant relationship between CP and Goal.<br> Presence of disease graph have 90 patients with Chest pain type 4 much higher as compared to other chest pain.So, Asymptomatic pain can high predictive power. # ## 4.2.3 Fasting Blood Sugar # In[ ]: sizes = [len(dataset[dataset['FBS'] == 'No']), len(dataset[dataset['FBS'] == 'Yes'])] labels = ['No', 'Yes'] plt.pie(x=sizes, labels=labels, explode=(0.1, 0), autopct="%1.2f%%", startangle=90,shadow=True) print() # Fbs: fasting blood sugar > 120 mg/dl (1 = true; 0 = false) # In[ ]: f, axes = plt.subplots(1,2,figsize=(15,5)) absence = dataset[dataset["Goal"]=='Absence']["FBS"] presence = dataset[dataset["Goal"]=='Presence']["FBS"] sns.countplot(absence, data=dataset,ax=axes[0]).set_title('Absence of Heart Disease') sns.countplot(presence, data=dataset,ax=axes[1]).set_title('Presence of Heart Disease') print() f.savefig('fbs.png') # In[ ]: # Chi-square test of independence of variables cont = pd.crosstab(dataset['FBS'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : FBS is not associated with Goal #Alternate hypothesis : FBS is associated with Goal # ### Observation # As expected, given the high p-value, so we fail to reject null hypothesis and the test result detect a non-significant relationship between Fbs and Goal.<br> # Most individuals did not have fasting blood sugar levels greater than 120 mg/dL. This did not change greatly when the data was divided based on the presence of disease.So, FBS is not a predictive feature. # ## 4.2.4 Resting ECG Results # In[ ]: sizes = [len(dataset[dataset['RestECG'] =='Normal']), len(dataset[dataset['RestECG']=='Abnormality']), len(dataset[dataset['RestECG']=='Hypertrophy'])] labels = ['Normal', 'ST-T wave abnormality', 'definite left ventricular hypertrophy by Estes criteria'] plt.pie(x=sizes, labels=labels, explode=(0, 0, 0), autopct="%1.2f%%", startangle=90,shadow=True) print() # In[ ]: f, axes = plt.subplots(1,2,figsize=(15,5)) absence = dataset[dataset["Goal"]=='Absence']["RestECG"] presence = dataset[dataset["Goal"]=='Presence']["RestECG"] sns.countplot(absence, data=dataset,ax=axes[0],order=['Normal', 'Abnormality', 'Hypertrophy']).set_title('Absence of Heart Disease') sns.countplot(presence,ax=axes[1],order=['Normal', 'Abnormality', 'Hypertrophy']).set_title('Presence of Heart Disease') print() f.savefig('restecg.png') # In[ ]: print(f'Probability of Hypertropy in disease cohorts {presence[presence=="Hypertrophy"].value_counts()/len(presence)}') print(f'Probability of Hypertropy in non-disease cohorts {absence[absence=="Hypertrophy"].value_counts()/len(absence)}') # In[ ]: cont = pd.crosstab(dataset['RestECG'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : Exang is not associated with Goal #Alternate hypothesis : Exang is associated with Goal # ### Observation # # Most patients exhibited normal resting electrocardiograhic results . However, a higher proportion of diseased patients had hypertropy suggesting that this feature may contribute some predictive power. # ## 4.2.5 Exercise Induced Angina # In[ ]: sns.countplot(data =dataset , x = 'Exang') # exercise induced angina (1 = yes; 0 = no) # In[ ]: f, axes = plt.subplots(1,2,figsize=(15,5)) absence = dataset[dataset["Goal"]=='Absence']["Exang"] presence = dataset[dataset["Goal"]=='Presence']["Exang"] sns.countplot(absence, data=dataset,ax=axes[0]).set_title('Absence of Heart Disease') sns.countplot(presence, data=dataset,ax=axes[1]).set_title('Presence of Heart Disease') print() f.savefig('exang.png') # In[ ]: # Chi-square test of independence of variables cont = pd.crosstab(dataset['Exang'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : Exang is not associated with Goal #Alternate hypothesis : Exang is associated with Goal # ### Observation # As expected, given the low p-value, so we reject null hypothesis and the test result detect a significant relationship between Exang and Goal.<br> # Significantly more patients in the diseased cohort displayed exercise induced angina. This feature should be strongly predictive. # ## 4.2.6 Peak Exercise ST Segment # In[ ]: sns.countplot(data =dataset , x = 'Slope') # Slope: the slope of the peak exercise ST segment # Value 1: upsloping # Value 2: flat # Value 3: down-sloping # In[ ]: f, axes = plt.subplots(1,2,figsize=(15,5)) absence = dataset[dataset["Goal"]=='Absence']["Slope"] presence = dataset[dataset["Goal"]=='Presence']["Slope"] sns.countplot(absence, data=dataset,ax=axes[0]).set_title('Absence of Heart Disease') sns.countplot(presence, data=dataset,ax=axes[1]).set_title('Presence of Heart Disease') print() f.savefig('slope.png') # In[ ]: # Chi-square test of independence of variables cont = pd.crosstab(dataset['Slope'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : Slope is not associated with Goal #Alternate hypothesis : Slope is associated with Goal # ### Observation # As expected, given the low p-value, so we reject null hypothesis and the test result detect a significant relationship between Slope and Goal.<br> # Significantly more patients in the non-diseased cohort displayed Slope-Flat. This feature could be strongly predictive.<br> # The slope of the peak exercise ST segment differed between the non-disease and diseased cohorts with the majority of cardiac disease patients exhibiting a flat ST slope(value = 2).This can also have good predictive power. # ## 4.2.7 Number of Blood Vessels # In[ ]: sns.countplot(data =dataset , x = 'CA') # In[ ]: f, axes = plt.subplots(1,2,figsize=(15,5)) absence = dataset[dataset["Goal"]=='Absence']["CA"] presence = dataset[dataset["Goal"]=='Presence']["CA"] sns.countplot(absence, data=dataset,ax=axes[0]).set_title('Absence of Heart Disease') sns.countplot(presence, data=dataset,ax=axes[1]).set_title('Presence of Heart Disease') print() f.savefig('ca.png') # In[ ]: # Chi-square test of independence of variables cont = pd.crosstab(dataset['CA'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : CA is not associated with Goal #Alternate hypothesis : CA is associated with Goal # ### Observation # As expected, given the low p-value, so we reject null hypothesis and the test result detect a significant relationship between CA and Goal. # Significantly more patients in the diseased cohort has number of blood vessels greater than 1. This feature should be strongly predictive. # ## 4.2.8 Thalassemia # In[ ]: sizes = [len(dataset[dataset['Thal'] =='Normal']), len(dataset[dataset['Thal']=='Fixed Defect']), len(dataset[dataset['Thal']=='Reversible defect'])] labels = ['Normal', 'Fixed Defect', 'Reversible defect'] plt.pie(x=sizes, labels=labels, explode=(0, 0, 0), autopct="%1.2f%%", startangle=90,shadow=True) print() # In[ ]: f, axes = plt.subplots(1,2,figsize=(15,5)) absence = dataset[dataset["Goal"]=='Absence']["Thal"] presence = dataset[dataset["Goal"]=='Presence']["Thal"] sns.countplot(absence, data=dataset,ax=axes[0],order=['Normal', 'Fixed Defect', 'Reversible defect']).set_title('Absence of Heart Disease') sns.countplot(presence, data=dataset,ax=axes[1],order=['Normal', 'Fixed Defect', 'Reversible defect']).set_title('Presence of Heart Disease') print() f.savefig('thal.png') # In[ ]: # Chi-square test of independence of variables cont = pd.crosstab(dataset['Thal'],dataset['Goal']) chi_stat = stats.chi2_contingency(cont) print(f'Chi statistics is {chi_stat[0]} and p value is {chi_stat[1]}') #Null hypothesis : CA is not associated with Goal #Alternate hypothesis : CA is associated with Goal # ### Observation # As expected, given the low p-value, so we reject null hypothesis and the test result detect a significant relationship between Thal and Goal. Significantly more patients in the diseased cohort has Reversible defect. This feature should be strongly predictive. # # 5. Multivariate Visualisation # ## 5.1. Gender, Chest Pain Type and Resting ECG # In[ ]: sns.catplot(x="CP",hue="Goal", row="Gender",col="RestECG",data=dataset,kind="count",margin_titles=True) # ### Observation # # The following visual shows that most individuals diagnosed with cardiac disease were males with typical signs of asymptomatic angina pain(value = 4) and showing probable or definite left ventricular hypertropy(restECG =2) . # # ## 5.2. Gender, Chest Pain Type and Exercise Induced Angina # In[ ]: sns.catplot(x="CP",hue="Goal", row="Gender",col="Exang",data=dataset, kind="count",margin_titles=True) # Exang: exercise induced angina (1 = yes; 0 = no) # ### Observation # # The following visual shows that most individuals diagnosed with cardiac disease were males with typical signs of asymptomatic angina pain(value = 4) and having exercise induced angina(exang=1) . # ## 6. Summary # Exploration of the data indicated that Oldpeak, Thalach, CP(Asymptomatic pain), CA(>1), Thalassemia(Reversible defect) are possible useful features for predicting the presence of cardiac disease. Age, Exang, Slope,Trestbps, Chol, Gender, FBS and RestECG were also found to have a potentially minor predictive power.<br> # # Strong Predictive power attributes:- Oldpeak, Thalach, CP(Asymptomatic pain), CA(>1), Thalassemia(Reversible defect)<br> # # * Patient having higher(greater than 1) oldpeak value have more probability of having heart disease than patient with lower oldpeak value(leass than 1). # * Patient those have cardiac disease have lower heart rate(less than 140) as compare to patient not having cardiac disease(greater than 140). # * Patient suffering from heart disease have more probability of having asymptomatic chest pain than patient not having heart disease. # * Disease cohort have more chance(six times) of having reversible defect of thalassemia than non disease cohort. # # Moderate Predictive power attributes:- Age, Exang, Slope<br> # * Patient who are older have more chances of having heart disease. # * Disease cohort have more chances of having chest pain after exercise than non-disease cohort. # * Disease cohort have more chances of having flat st wave slope than non-disease cohort. # # Weak Predictive power attributes:- Trestbps, Chol, Gender, FBS, RestECG<br> # * These attributes didn't show any predictive power or can't distinguish between disease and non-disease cohort on the basis of these attributes. # # *** # <br> # # # Applying Models # ### Creating Dummy Variables # Before applying any model, creating dummy variables of categorical attributes.Dummy variables are useful because they enable to use a single regression equation to represent multiple groups and also a single dummy variable contains a lot of information . Dummy variable converts categorical variable into k-1 new attributes(k is the levels of categorical attribute). # In[ ]: dataset['Goal']=dataset['Goal'].replace( ['Absence', 'Presence'],[0,1]) dataset['Goal']=dataset['Goal'].astype('int64') dataset = pd.get_dummies(dataset,drop_first=False) # ### Scaling the numerical variables # Feature scaling is a method used to standardize the range of independent variables or features of data.Min-max scaling is the simplest method and consists in rescaling the range of features to scale the range in [0,1]. # In[ ]: dataset = (dataset - np.min(dataset)) / (np.max(dataset) - np.min(dataset)).values dataset.head() # ## Splitting Dataset into Train and Test set # To implement this algorithm model, we need to separate dependent and independent variables within our data sets and divide the dataset in training set and testing set for evaluating models. # In[ ]: X_train, X_test, y_train, y_test = train_test_split(dataset.drop('Goal', 1), dataset['Goal'], test_size = .2, random_state=42,shuffle=True) # # Logistic Regression # * Logistic Regression is generally used for classification purposes.When the number of possible outcomes is only two it is called Binary Logistic Regression.<br>In logistic regression we don't output the weighted sum of inputs directly, but we pass it through a function that can map any real value between 0 and 1.<br>The activation function is sigmoid function.1 / (1 + e^-value)The sigmoid function, also called logistic function gives an ‘S’ shaped curve that can take any real-valued number and map it into a value between 0 and 1. # <br> # * The coefficients (Beta values b) of the logistic regression algorithm must be estimated from your training data.<br> # ### Training Model # ### Hyperparameter Tuning using GridSearchCV # Grid search is an approach to hyperparameter tuning that will methodically build and evaluate a model for each combination of algorithm parameters specified in a grid # In[ ]: from sklearn.model_selection import GridSearchCV lr = LogisticRegression(class_weight='balanced',random_state=42) param_grid ={'C': [0.1,0.2,0.3,0.4],'penalty': ['l1', 'l2'],'class_weight':[{0: 1, 1: 1},{ 0:0.67, 1:0.33 },{ 0:0.75, 1:0.25 },{ 0:0.8, 1:0.2 }]} CV_rfc = GridSearchCV(estimator=lr, param_grid=param_grid, cv= 5) CV_rfc.fit(X_train, y_train) CV_rfc.best_params_ # ##### C: Inverse of regularization strength; must be a positive float. # * Regularization is applying a penalty to increasing the magnitude of parameter values in order to reduce overfitting. Regularization is a technique to discourage the complexity of the model. It does this by penalizing the loss function.<br>L(x,y)=(y(i)-f(x))^2 <br> # This helps to solve the overfitting problem. # * Loss function is the sum of squared difference between the actual value and the predicted value. # * As the degree of the input features increases the model becomes complex and tries to fit all the data points. # * So by penalizing the smaller weights and make them too small, very close to zero. It makes those terms negligible and helps simplify the model. # * Regularization works on assumption that smaller weights generate simpler model and thus helps avoid overfitting. # # ##### L1 Regularization or Lasso # * In L1 norm we shrink the parameters to zero. When input features have weights closer to zero. majority of the input features have zero weights and very few features have non zero weights.L1 regularization does feature selection. It does this by assigning insignificant input features with zero weight and useful features with a non zero weight.<br>L(x,y)=(y(i)-f(x))^2 +lambda* sum of parameters. # # ##### L2 Regularization or Ridge Regularization # * L2 regularization forces the weights to be small but does not make them zero.<br>L(x,y)=(y(i)-f(x))^2 +lambda* sum of squares of parameters. # # ##### Class weight # class weights will be given by n_samples / (n_classes * np.bincount(y)) # # So,C=0.3 and l1 regularization are best hyperparameters for my model. All the models will have these hyperparameters value. # #### Training the model with all attributes # In[ ]: #fitting the model lr1=LogisticRegression(C=0.2,random_state=42,penalty='l1',class_weight={0:1,1:1}) lr1.fit(X_train,y_train) # In[ ]: y_pred1=lr1.predict(X_test) print("Logistic Train score with ",format(lr1.score(X_train, y_train))) print("Logistic Test score with ",format(lr1.score(X_test, y_test))) # ### Confusion Matrix # In[ ]: class_names=[0,1] fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) cm2 = confusion_matrix(y_test, y_pred1) ax.xaxis.set_label_position("top") plt.tight_layout() plt.title('Confusion matrix', y=1.1) plt.ylabel('Actual label') plt.xlabel('Predicted label') # In[ ]: sensitivity2 = cm2[1,1]/(cm2[1,1]+cm2[1,0]) print('Sensitivity/Recall : ', sensitivity2) specificity2 = cm2[0,0]/(cm2[0,0]+cm2[0,1]) print('Specificity : ', specificity2) precision2 = cm2[1,1]/(cm2[1,1]+cm2[0,1]) print('Precision : ', precision2) F1score2=(2*sensitivity2*precision2)/(sensitivity2+precision2) print('F1 score : ', F1score2) # In a medical test the big indicators of success are specificity and sensitivity. # * Sensitivity/recall – how good a test is at detecting the positives. A test can cheat and maximize this by always returning “positive”. # * Specificity – how good a test is at avoiding false alarms. A test can cheat and maximize this by always returning “negative # * Precision – how many of the positively classified were relevant. A test can cheat and maximize this by only returning positive on one result it’s most confident in. # * F1 score - a good F1 score means that you have low false positives and low false negatives, so you're correctly identifying real threats and you are not disturbed by false alarms. # #### Training model with the most predictive power attributes concluded in Eda part # In[ ]: test_attributes=X_test[['Oldpeak','Thalach','CA','Thal_Reversible defect','CP_Asymptomatic pain']] train_attributes=X_train[['Oldpeak','Thalach','CA','Thal_Reversible defect','CP_Asymptomatic pain']] lr2=LogisticRegression(C=0.3,penalty='l2',class_weight={0:1,1:1}) lr2.fit(train_attributes,y_train) # In[ ]: y_pred2=lr2.predict(test_attributes) print("Logistic Train score with ",format(lr2.score(train_attributes, y_train))) print("Logistic Test score with ",format(accuracy_score(y_pred1, y_test))) # As concluded in eda part, these 'Oldpeak','Thalach','CA','Thal_Reversible defect','CP_Asymptomatic pain' four columns were showing strong predictive power.By training the model on these 4 columns, training accuracy is 84% and testing accuracy is 83%. # #### Confusion Matrix # In[ ]: class_names=[0,1] fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) cm2 = confusion_matrix(y_test, y_pred2) ax.xaxis.set_label_position("top") plt.tight_layout() plt.title('Confusion matrix', y=1.1) plt.ylabel('Actual label') plt.xlabel('Predicted label') # In[ ]: sensitivity2 = cm2[1,1]/(cm2[1,1]+cm2[1,0]) print('Sensitivity/Recall : ', sensitivity2) specificity2 = cm2[0,0]/(cm2[0,0]+cm2[0,1]) print('Specificity : ', specificity2) precision2 = cm2[1,1]/(cm2[1,1]+cm2[0,1]) print('Precision : ', precision2) F1score2=(2*sensitivity2*precision2)/(sensitivity2+precision2) print('F1 score : ', F1score2) # In a medical test the big indicators of success are specificity and sensitivity. # * Sensitivity/recall – how good a test is at detecting the positives. A test can cheat and maximize this by always returning “positive”. # * Specificity – how good a test is at avoiding false alarms. A test can cheat and maximize this by always returning “negative # * Precision – how many of the positively classified were relevant. A test can cheat and maximize this by only returning positive on one result it’s most confident in. # * F1 score - a good F1 score means that you have low false positives and low false negatives, so you're correctly identifying real threats and you are not disturbed by false alarms. # ### K-FOLD Cross Validation # Cross Validation is a very useful technique for assessing the performance of machine learning models. It helps in knowing how the machine learning model would generalize to an independent data set.K-Fold CV is where a given data set is split into a K number of sections/folds where each fold is used as a testing set at some point. # In[ ]: skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42) skf.get_n_splits(X_train, y_train) results = cross_val_score(lr1, X_train, y_train, cv=skf, n_jobs=1, scoring='accuracy') results.mean() # It also check for overfitting and to get an idea about how your machine learning model will generalize.84% mean accuracy of all the folds proves there is no overfitting. # #### Feature Importance # Feature importances for any black-box estimator by measuring how score decreases when a feature is not available; the method is also known as “permutation importance”. # In[ ]: perm_imp1 = PermutationImportance(lr1, random_state=42,scoring='accuracy').fit(X_test, y_test) eli5.show_weights(perm_imp1, feature_names = X_test.columns.tolist(),top=50) # After seeing the feature importance chart, i will remove the columns which have less weight or importance. The main motive of applying a model is getting a good accuracy with less features. # # So, CA is the most important feature i.e. number of major vessels which makes sense as what happens when your heart's blood supply is blocked or interrupted by a build-up of fatty substances in the coronary arteries", it seems logical the more major vessels is a good thing, and therefore will reduce the probability of heart disease # In[ ]: X_train2=X_train.drop(['Exang_No'],axis=1) X_test2=X_test.drop(['Exang_No'],axis=1) # ## Applying Model after removing least significant attribute # A general rule in machine learning is that the more features you have, the more likely your model will suffer from overfitting # In[ ]: lr2=LogisticRegression(C=0.2,penalty='l1',class_weight={0:1,1:1}) lr2.fit(X_train2,y_train) y_pred2=lr2.predict(X_test2) # In[ ]: print("Logistic TRAIN score with ",format(lr2.score(X_train2, y_train))) print("Logistic TEST score with ",format(lr2.score(X_test2, y_test))) # #### Feature Importance # Feature importances for any black-box estimator by measuring how score decreases when a feature is not available; the method is also known as “permutation importance”. # In[ ]: perm_imp1 = PermutationImportance(lr2, random_state=42,scoring='accuracy').fit(X_test2, y_test) eli5.show_weights(perm_imp1, feature_names = X_test2.columns.tolist(),top=50) # In[ ]: X_train3=X_train2.drop(['Age','Thal_Fixed Defect','Slope_Flat','Slope_Down-sloping','RestECG_Hypertrophy','RestECG_Abnormality','FBS_Yes','Oldpeak'],axis=1) X_test3=X_test2.drop(['Age','Thal_Fixed Defect','Slope_Flat','Slope_Down-sloping','RestECG_Hypertrophy','RestECG_Abnormality','FBS_Yes','Oldpeak'],axis=1) # ### Applying Model after removing least significant attribute # A general rule in machine learning is that the more features you have, the more likely your model will suffer from overfitting # In[ ]: lr3=LogisticRegression(C=0.3,penalty='l2',class_weight={0:1,1:1}) lr3.fit(X_train3,y_train) y_pred3=lr3.predict(X_test3) y_probab3=lr3.predict_proba(X_test3) # In[ ]: print("Logistic TRAIN score with ",format(lr2.score(X_train2, y_train))) print("Logistic TEST score with ",format(lr2.score(X_test2, y_test))) # So, even after removing the less significant columns accuracy is not changing.The best accuracy acheived is 83% test and 85% train accuracy. # ### Confusion Matrix # In[ ]: class_names=[0,1] fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) cm2 = confusion_matrix(y_test, y_pred3) ax.xaxis.set_label_position("top") ax.yaxis.set_label_position("right") plt.tight_layout() plt.ylabel('Actual label') plt.xlabel('Predicted label') plt.savefig("cmlr.png") # In[ ]: sensitivity2 = cm2[1,1]/(cm2[1,1]+cm2[1,0]) print('Sensitivity/Recall : ', sensitivity2) specificity2 = cm2[0,0]/(cm2[0,0]+cm2[0,1]) print('Specificity : ', specificity2) precision2 = cm2[1,1]/(cm2[1,1]+cm2[0,1]) print('Precision : ', precision2) F1score2=(2*sensitivity2*precision2)/(sensitivity2+precision2) print('F1 score : ', F1score2) # In a medical test the big indicators of success are specificity and sensitivity. # * Sensitivity/recall – how good a test is at detecting the positives. A test can cheat and maximize this by always returning “positive”. # * Specificity – how good a test is at avoiding false alarms. A test can cheat and maximize this by always returning “negative # * Precision – how many of the positively classified were relevant. A test can cheat and maximize this by only returning positive on one result it’s most confident in. # * F1 score - a good F1 score means that you have low false positives and low false negatives, so you're correctly identifying real threats and you are not disturbed by false alarms. # In[ ]: fpr1, tpr1, thresholds1 = roc_curve(y_test, y_pred2) fig, ax = plt.subplots() ax.plot(fpr1, tpr1) ax.plot([0, 1], [0, 1], transform=ax.transAxes, ls="--", c=".3") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.title('ROC curve for diabetes classifier') plt.xlabel('False Positive Rate (1 - Specificity)') plt.ylabel('True Positive Rate (Sensitivity)') plt.grid(True) plt.savefig("roclr.png") # In[ ]: #Higher the AUC, better the model is at distinguishing between patients with disease and no disease. roc_auc2 = auc(fpr1, tpr1) roc_auc2 # AUC value is 0.82 that means our model is able to distinguishing between patients with disease and no disease with probability of 0.82. So it is a good value. # Our classifier should be sensitive to false negatives. For this dataset, false negative is a person that has heart disease but our classifier decided that the person does not have any heart problems. In other words, classifier said that the ill person is healthy. On the other side, false positive is a person that does not have any heart diseases and our classifier decided that person is ill. In that case, the person will run more tests and conclude it does not have any heart problems.So my model should have high sensitivity.<br> # The optimal cut off would be where tpr is high and fpr is low # ## Reducing Type II Error # In[ ]: y_pred04=[] for i in range(len(y_probab3)): y_pred04.append(1 if y_probab3[i,1]>0.3 else 0) # In[ ]: class_names=[0,1] fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) cm04 = confusion_matrix(y_test, y_pred04) ax.xaxis.set_label_position("top") plt.tight_layout() plt.title('Confusion matrix', y=1.1) plt.ylabel('Actual label') plt.xlabel('Predicted label') # In[ ]: sensitivity04 = cm04[1,1]/(cm04[1,1]+cm04[1,0]) print('Sensitivity/Recall : ', sensitivity04) specificity04 = cm04[0,0]/(cm04[0,0]+cm04[0,1]) print('Specificity : ', specificity04) precision04 = cm04[1,1]/(cm04[1,1]+cm04[0,1]) print('Precision : ', precision04) F1score04=(2*sensitivity04*precision04)/(sensitivity04+precision04) print('F1 score : ', F1score04) # In a medical test the big indicators of success are specificity and sensitivity. # * Sensitivity/recall – how good a test is at detecting the positives. A test can cheat and maximize this by always returning “positive”. # * Specificity – how good a test is at avoiding false alarms. A test can cheat and maximize this by always returning “negative # * Precision – how many of the positively classified were relevant. A test can cheat and maximize this by only returning positive on one result it’s most confident in. # * F1 score - a good F1 score means that you have low false positives and low false negatives, so you're correctly identifying real threats and you are not disturbed by false alarms. # ## Logistic Regression with PCA # In[ ]: from sklearn.decomposition import PCA pca = PCA(n_components=10) pca.fit(X_train) trainComponents = pca.fit_transform(X_train) testComponents = pca.fit_transform(X_test) lr=LogisticRegression(C=0.3,penalty='l2',class_weight={0:1,1:1}) lr.fit(trainComponents,y_train) y_pred5=lr.predict(testComponents) print("Logistic TRAIN score with ",format(lr.score(trainComponents, y_train))) print("Logistic TEST score with ",format(lr.score(testComponents, y_test))) # In[ ]: pca.explained_variance_ratio_.cumsum() # In[ ]: class_names=[0,1] fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) cm04 = confusion_matrix(y_test, y_pred5) ax.xaxis.set_label_position("top") plt.tight_layout() plt.title('Confusion matrix', y=1.1) plt.ylabel('Actual label') plt.xlabel('Predicted label') # In[ ]: sensitivity2 = cm2[1,1]/(cm2[1,1]+cm2[1,0]) print('Sensitivity/Recall : ', sensitivity2) specificity2 = cm2[0,0]/(cm2[0,0]+cm2[0,1]) print('Specificity : ', specificity2) precision2 = cm2[1,1]/(cm2[1,1]+cm2[0,1]) print('Precision : ', precision2) F1score2=(2*sensitivity2*precision2)/(sensitivity2+precision2) print('F1 score : ', F1score2) # In a medical test the big indicators of success are specificity and sensitivity. # * Sensitivity/recall – how good a test is at detecting the positives. A test can cheat and maximize this by always returning “positive”. # * Specificity – how good a test is at avoiding false alarms. A test can cheat and maximize this by always returning “negative # * Precision – how many of the positively classified were relevant. A test can cheat and maximize this by only returning positive on one result it’s most confident in. # * F1 score - a good F1 score means that you have low false positives and low false negatives, so you're correctly identifying real threats and you are not disturbed by false alarms. # ## Comparing all logistic models # | Models | Accuracy | Senstivity | Specificity | Precision | F1score # | --- | --- | --- | --- | --- | --- | # | Logistic(all variables) | 83% | .76 |.85 | .83|.8 # | Logistic(5 variables) | 81% | .73 |.9 | .85|.79 # | Logistic(rem less sign) | 83% | .76 |.85 | .83|.81 # | Logistic(PCA) | 77% | .76 |.85 | .83|.8 # ## Random Forest # A random forest is simply a collection of decision trees whose results are aggregated into one final result. Their ability to limit overfitting without substantially increasing error due to bias is why they are such powerful models. <br> # Random forests consist of multiple single trees each based on a random sample of the training data. They are typically more accurate than single decision trees. The following figure shows the decision boundary becomes more accurate and stable as more trees are added # ### Hyperparameter Tuning using GridSearch # In[ ]: from sklearn.model_selection import GridSearchCV rfc = RandomForestClassifier(oob_score=True,random_state=42) param_grid ={'n_estimators': [200,300,500],'max_features': ['auto', 'sqrt', 'log2'],'max_depth' : [4,5,6],'criterion' :['gini', 'entropy']} CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 3) CV_rfc.fit(X_train, y_train) CV_rfc.best_params_ # 1. Entropy # A decision tree is built top-down from a root node and involves partitioning the data into subsets that contain instances with similar values (homogenous). ID3 algorithm uses entropy to calculate the homogeneity of a sample. If the sample is completely homogeneous the entropy is zero and if the sample is an equally divided it has entropy of one. # # 2. Gini Index # Gini index says, if we select two items from a population at random then they must be of same class and probability for this is 1 if population is pure.Higher the value of Gini higher the homogeneity.It performs only Binary splits # <br> # * n_estimators represents the number of trees in the forest. Usually the higher the number of trees the better to learn the data # * max_depth represents the depth of each tree in the forest. The deeper the tree, the more splits it has and it captures more information about the data. We fit each decision tree with depths ranging from 1 to 32 and plot the training and test errors. # * max_features represents the number of features to consider when looking for the best split. # In[ ]: model = RandomForestClassifier(max_depth=6,oob_score=True,random_state=42,criterion='entropy',max_features='auto',n_estimators=300) model.fit(X_train, y_train) # In[ ]: estimator = model.estimators_[3] feature_names = [i for i in X_train.columns] y_train_str = y_train.astype('str') y_train_str[y_train_str == '0'] = 'Absence' y_train_str[y_train_str == '1'] = 'Presence' y_train_str = y_train_str.values # In[ ]: export_graphviz(estimator, out_file='tree.dot',feature_names = feature_names,class_names = y_train_str,rounded = True, proportion = True,label='root',precision = 2, filled = True) import os os.environ["PATH"] += os.pathsep + 'C:\\Users\\u22v03\\Documents\\Python Scripts\\heart\\release\\bin' from IPython.display import Image Image(filename = 'tree.png') # In[ ]: y_pred_quant = model.predict_proba(X_test)[:, 1] y_pred_bin = model.predict(X_test) # In[ ]: print("Random forest TRAIN score with ",format(model.score(X_train, y_train))) print("Random forest TEST score with ",format(model.score(X_test, y_test))) # ### Feature Importance # Feature importance is calculated as the decrease in node impurity weighted by the probability of reaching that node. The node probability can be calculated by the number of samples that reach the node, divided by the total number of samples. The higher the value the more important the feature # In[ ]: feature_importances = pd.DataFrame(model.feature_importances_,index = X_train.columns,columns=['importance']).sort_values('importance',ascending=False) feature_importances # In[ ]: cm4 = confusion_matrix(y_test, y_pred_bin) class_names=[0,1] fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) ax.xaxis.set_label_position("top") ax.yaxis.set_label_position("right") plt.tight_layout() plt.ylabel('Actual label') plt.xlabel('Predicted label') plt.savefig("cmrf.png") # In[ ]: sensitivity4 = cm4[1,1]/(cm4[1,1]+cm4[1,0])#how good a test is at detecting the positives print('Sensitivity/Recall : ', sensitivity4) specificity4 = cm4[0,0]/(cm4[0,0]+cm4[0,1])#how good a test is at avoiding false alarms print('Specificity : ', specificity4) precision4 = cm4[1,1]/(cm4[1,1]+cm4[0,1])#how many of the positively classified were relevant print('Precision : ', precision4) F1score4=(2*sensitivity4*precision4)/(sensitivity4+precision4)# low false positives and low false negatives print('F1 score : ', F1score4) # In[ ]: fpr1, tpr1, thresholds1 = roc_curve(y_test, y_pred_bin) fig, ax = plt.subplots() ax.plot(fpr1, tpr1) ax.plot([0, 1], [0, 1], transform=ax.transAxes, ls="--", c=".3") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.title('ROC curve for diabetes classifier') plt.xlabel('False Positive Rate (1 - Specificity)') plt.ylabel('True Positive Rate (Sensitivity)') plt.grid(True) plt.savefig("rocrf.png") # | Models | Accuracy | Senstivity | Specificity | Precision | F1score # | --- | --- | --- | --- | --- | --- | # | Random Forest(all variables) | 81% | .76 |.85 | .83|.8 # ## Applying Model after removing less significant attributes # A general rule in machine learning is that the more features you have, the more likely your model will suffer from overfitting # In[ ]: X_train5=X_train.drop(['RestECG_Abnormality'],axis=1) X_test5=X_test.drop(['RestECG_Abnormality'],axis=1) # In[ ]: model = RandomForestClassifier(max_depth=5,oob_score=True,random_state=42,criterion='gini',max_features='auto',n_estimators=300) model.fit(X_train5, y_train) # In[ ]: y_pred_quant = model.predict_proba(X_test5)[:, 1] y_pred_bin = model.predict(X_test5) # In[ ]: print("Random forest TRAIN score with ",format(model.score(X_train5, y_train))) print("Random forest TEST score with ",format(model.score(X_test5, y_test))) # So, removing less significant columns in random forest reduces accuracy, only training time decreases. # ## Decision Tree # In[ ]: from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier() model.fit(X_train, y_train) y_pred_bin=model.predict(X_test) # In[ ]: param_grid = {"criterion": ['entropy', 'gini'],"min_samples_split": [5,10,15],"max_depth": [2,3,5],"min_samples_leaf": [5,10,15],"max_leaf_nodes": [5,10,15],} CV_rfc = GridSearchCV(estimator=model, param_grid=param_grid, cv= 3) CV_rfc.fit(X_train, y_train) CV_rfc.best_params_ # In[ ]: print("Random forest TRAIN score with ",format(model.score(X_train, y_train))) print("Random forest TEST score with ",format(model.score(X_test, y_test))) # | Models | Accuracy | Senstivity | Specificity | Precision | F1score # | --- | --- | --- | --- | --- | --- | # | Logistic(all variables) | 83% | .76 |.85 | .83|.8 # | Logistic(5 variables) | 81% | .73 |.9 | .85|.79 # | Logistic(rem less sign) | 83% | .76 |.85 | .83|.81 # | Logistic(PCA) | 77% | .76 |.85 | .83|.8 # | Decision Tree(all variables) | 72% | .76 |.85 | .83|.8 # | Random Forest(all variables) | 81% | .76 |.85 | .83|.8 # ## Summary # # In this study,aim was to predict if a person has a heart disease or not based on attributes blood pressure,heart beat, exang, fbs and others. # # Logistic regression model with all the variables and logistic regression model after removing less significant attributes performed best with an accuracy 83% , sensitivity 76%, specificity 85%, precision 83% and f1score 81%. # # # Important features:- # # Oldpeak, Thalach, CP(Asymptomatic pain), CA(>1), Thalassemia(Reversible defect)these features were showing high predictive power after eda.<br> # Oldpeak, CA, Exang_Yes, Thalach and Thal_Reversible defect are the important features for classification after applying models.<br> # So, the number of major blood vessels increases, the probability of heart disease decreases. That makes sense, as it means more blood can get to the heart. # As the heart rate increases, the probability of heart disease also increases. That makes sense, as it means having high heart rate leads to having a heart disease. # # #
#!/usr/bin/env python # coding: utf-8 # # Predicting Health Insurance Costs # ## Introduction # # In this project, we are assigned data about health insurance contractors, and we aim to construct a model that could predict a given contractor's insurance charges. # # ## Exploratory Analysis # # To start, we will perform an exploratory analysis on the data. This will help us understand the data, and decide what needs to be done to the data to preprocess it, and what techniques are necessary to build the best model. First, lets' take a look at the data. # # In[ ]: import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns #import data CSV into dataframe data_raw = pd.read_csv("../../../input/mirichoi0218_insurance/insurance.csv") #show sample of data print(data_raw.head(n=7)) # #### The data consists of 7 columns/variables: # # - **Age:** The age of the contractor in years expressed as a numerical value. # - **Sex:** The contractor's sex, taking string values as either "male" or "female". # - **BMI:** The contactor's Body Mass Index, which represents the ratio of the body's mass to its height, expressed in numerical values. # - **Children:** Number of dependents of contractors, expressed as a numerical value. # - **Smoker:** Denotes whether the contractor smokes tobacco or not. Expressed in string values of "yes" and "no". # - **Region:** The region within the US where the contractor is. It is expressed in string values of "northeast", "northwest", "southeast", and "southwest". # - **Charges:** The monetary amount that was billed by the health insurance company, expressed in a numerical value. # # To make the data more manageable, we will change all binary string values (**sex** and **smoker** features) to binary numerical values (0 and 1). **Regions** is a non-binary categorical feature. We can perform one-hot encoding on it, but we wil first analyze the data to determine if this is necessary. # In[ ]: #smoker = 1; non-smoker = 0 data = data_raw.replace(['yes','no'], [1,0]) #female = 1; male = 0 data = data.replace(['female','male'], [1,0]) print(data.head()) # After the changes, we will create graphical representations of the data to better understand it. # In[ ]: #prepare subplots, with 2 columns and 2 rows fig1, ((ax11,ax12),(ax13,ax14)) = plt.subplots(2,2) #set full plot size fig1.set_size_inches(15,12) #Create a pie chart of the region distribution ax11.pie(data.groupby("region").size().values, labels=data.groupby('region').size().keys(), autopct='%1.1f%%') ax11.set_title("Region Distribution", fontsize=20) ax11.axis('equal') #Create a pie chart of the sex distribution ax13.pie(data.groupby("sex").size().values, labels=data.groupby('sex').size().keys(), autopct='%1.1f%%', startangle=90) ax13.set_title("Sex Distribution", fontsize=20) ax13.axis('equal') #Create a pie chart of the smoker distribution ax12.pie(data.groupby("smoker").size().values, labels=data.groupby('smoker').size().keys(), autopct="%1.1f%%") ax12.set_title("Smoker Distribution", fontsize=20) ax12.axis('equal') #Create a histogram of the children/dependnet distribution ax14.hist('children', data=data,edgecolor =' 0.2', bins = 5) ax14.set_title("Dependent Distribution", fontsize=20) # > The contractors are divided between the 4 regions nearly equally, and are also split between the two sexes equally, as shown in the two pie charts on the left. The majority of contractors are non-smokers, and it is evident that contractors without children dominate, while the frequency decreases as the number of children goes up. # In[ ]: #Prepare subplots with 1 row, 2 columns fig21, (ax21,ax22) = plt.subplots(1,2) fig21.set_size_inches(15,6) #Create a density curve of the BMI distribution sns.kdeplot(data['bmi'], ax=ax21, shade=True, legend=False) ax21.set_xlabel("BMI", fontsize=14) ax21.set_ylabel("Frequency", fontsize=14) ax21.set_title("BMI Distribution", fontsize=20) #Create a histogram of the age distribution ax22.hist('age', data=data, bins=10, edgecolor='0.2') ax22.set_xlabel("Age", fontsize=14) ax22.set_ylabel("Frequency", fontsize=14) ax22.set_title("Age Distribution", fontsize=20) #Create a separate subplot for the charges distribution #This is because this is a more important graph, and is better to take up two columns fig22, ax23 = plt.subplots() fig22.set_size_inches(15,6) #Create density plot of charges distribution sns.kdeplot(data['charges'], ax=ax23, shade=True, legend=False) ax23.set_xlabel("Charges", fontsize=14) ax23.set_ylabel("Frequency", fontsize=14) ax23.set_title("Charges Distribution", fontsize=20) # > - The BMI distribution is bell-shaped and symmetrical # - The Age distribution is mostly uniform, except for ages below 25 where the histogram peaks. # - The charges distribution is skewed to the left. # # Left skewness is typical of monetary distributions. A logarithmic transformation is often useful for such distributions, and particularly monetary distributions, given that money values tend to be thought of as multiplicative more=so than additive. For example, a $100 difference between $50,100 and $50,000 might seem insignificant, whereas the same difference between $150 and $250 is not. # # The feature of interest is the **charges** feature. Let's examine the effect of categorical features on the distribution of billed charges. The categorical features are: Region, Sex, abd Smoker. We will also treat the "children" feature as categorical and examine it in the same manner, because it only has 6 values. # In[ ]: #Prepare subplots of 4 rows and 1 column fig3, (ax31,ax32,ax33,ax34) = plt.subplots(4,1) fig3.set_size_inches(14,28) #Add 4 density curves to subplot ax31 for the charges distribution, each for one of the 4 regions sns.kdeplot(data.loc[data["region"] == 'southeast']["charges"], ax=ax31, shade=True, label="southeast") sns.kdeplot(data.loc[data["region"] == 'southwest']["charges"], ax=ax31, shade=True, label="southwest") sns.kdeplot(data.loc[data["region"] == 'northwest']["charges"], ax=ax31, shade=True, label="northwest") sns.kdeplot(data.loc[data["region"] == 'northeast']["charges"], ax=ax31, shade=True, label="northeast") ax31.set_ylabel("Frequency", fontsize=15) ax31.set_xlabel('Charges', fontsize=15) ax31.set_title("Effect of Regions on Cost", fontsize=20) #Add 2 density curves to subplot ax32 for the charges distribution, each for one of the 2 sexes #Remember: female = 1, male = 0 sns.kdeplot(data.loc[data["sex"] == 0]["charges"], ax=ax32, shade=True, label="male") sns.kdeplot(data.loc[data["sex"] == 1]["charges"], ax=ax32, shade=True, label="female") ax32.set_ylabel("Frequency", fontsize=15) ax32.set_xlabel('Charges', fontsize=15) ax32.set_title("Effect of Sex on Cost", fontsize=20) #Add 6 density curves to subplot ax33 for the charges distribution, each for one caregory of the number of dependents sns.kdeplot(data.loc[data["children"] == 0]["charges"], ax=ax33, shade=True, label="0 children") sns.kdeplot(data.loc[data["children"] == 1]["charges"], ax=ax33, shade=True, label="1 children") sns.kdeplot(data.loc[data["children"] == 2]["charges"], ax=ax33, shade=True, label="2 children") sns.kdeplot(data.loc[data["children"] == 3]["charges"], ax=ax33, shade=True, label="3 children") sns.kdeplot(data.loc[data["children"] == 4]["charges"], ax=ax33, shade=True, label="4 children") sns.kdeplot(data.loc[data["children"] == 5]["charges"], ax=ax33, shade=True, label="0 children") ax33.set_ylabel("Frequency", fontsize=15) ax33.set_xlabel('Charges', fontsize=15) ax33.set_title("Effect of Number of Dependents on Cost", fontsize=20) #Add 2 density curves to subplot ax34 for the charges distribution, one for smokers and one for non-smokers sns.kdeplot(data.loc[data["smoker"] == 1]["charges"], ax=ax34, shade=True, label='smoker') sns.kdeplot(data.loc[data["smoker"] == 0]["charges"], ax=ax34, shade=True, label='non-smoker') ax34.set_ylabel("Frequency", fontsize=15) ax34.set_xlabel('Charges', fontsize=15) ax34.set_title("Effect of Smoking on Cost", fontsize=20) # > - The difference in region introduces no change to the cost distribution # - Sex also has no effect on cost # - All values of **children** do not effect the cost distribution, except for children = 5. But it is a small difference. # - **Smoking** makes a significant difference in cost, and hence is an important feature # # > In Conclusion, region, sex, and children do not effect cost significantly, whereas smoking influences it very significantly. # Next, we will analyze continuous features' effects on cost using scatter plots. # In[ ]: #Prepare subplots with 1 row and 2 columns fig4, (ax41,ax42) = plt.subplots(1,2) fig4.set_size_inches(15,6) #create a scatterplot with best linear fit for Age vs. Charges sns.regplot("age", "charges", data, ax=ax41) ax41.set_title("Effect of Age on Cost", fontsize=20) ax41.set_xlabel("Age", fontsize=15) ax41.set_ylabel("Charges", fontsize=15) #create a scatterplot with best linear fit for BMI vs. Charges sns.regplot("bmi", "charges", data, ax=ax42) ax42.set_title("Effect of BMI on Cost", fontsize=20) ax42.set_xlabel("BMI", fontsize=15) ax42.set_ylabel("Charges", fontsize=15) # > Looking at the **Age** scatter plot, we can see that there are three distinct clusters. Most of the data resides in the bottom cluster, which more packed (smaller thickness) than the other two clusters. Each of the clusters exhibit a linear form, however, a simple least-squares regression between **Age** and **Charges** would not be good, because, as we can see, the best line fit has much error associated with it. a More Complicated model would be needed. # We can attempt to break up the data by the important feature, **Smoking**, and see if this would yield any of the clusters we see. # # > **BMI** correlates with charges, but we could also attempt to break it up by by smoking to see if we get any clearer correlation. # In[ ]: #Prepare subplots with 1 row and 2 columns fig5, (ax51,ax52) = plt.subplots(1,2) fig5.set_size_inches(15,6) #create 2x Age vs Charges scatter plots on the same axes; one for smokers and one for non-smokers sns.regplot("age", "charges", data.loc[data["smoker"] == 1], ax=ax51) sns.regplot("age", "charges", data.loc[data["smoker"] == 0], ax=ax51) ax51.set_title("Effect of Age on Cost", fontsize=20) ax51.set_xlabel("Age", fontsize=15) ax51.set_ylabel("Charges", fontsize=15) ax51.legend(("smoker", "non-smoker")) #create 2x BMI vs Charges scatter plots on the same axes; one for smokers and one for non-smokers sns.regplot("bmi", "charges", data.loc[data["smoker"] == 1], ax=ax52) sns.regplot("bmi", "charges", data.loc[data["smoker"] == 0], ax=ax52) ax52.set_title("Effect of BMI on Cost", fontsize=20) ax52.set_xlabel("BMI", fontsize=15) ax52.set_ylabel("Charges", fontsize=15) ax52.legend(("smoker", "non-smoker")) # > When separating Age vs. Charges scatter plot by **smoking**, we see that all smoker contractors fell into the upper two clusters that were previously observed. The non-smoking section of the data included all of the bottom cluster, plus some of the middle cluster. # # > In the BMI scatter plot, we can observe that for non-smokers, BMI has nearly no effect on cost. For smokers, however, there a strong trend. This means that it was the smoker data that was driving the correlation we witnessed above between all contractors' BMI and insurance charges. # # Based on these observations, a regression tree would be the ideal model for this problem. # In[ ]: from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor from sklearn.grid_search import GridSearchCV from sklearn.metrics import r2_score #Create a dataframe of the input data X #Create a dataframe of the output data Y X = data.drop(['charges','sex','region'], axis=1) Y = data["charges"] #split data into training and testing sets; 20% testing size. X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) #Call training model regress=DecisionTreeRegressor() #Parameters for Grid Search params_tree = {"max_depth":np.arange(3,6), "min_samples_split":np.arange(2,8), "max_leaf_nodes":np.arange(2,20)} #Call and fit grid search grid_tree=GridSearchCV(regress, params_tree) grid_tree.fit(X_train,Y_train) #Obtain optimized parameters grid_tree.best_estimator_ predictions = grid_tree.predict(X_test) r2_score(predictions, Y_test) # In[ ]: #Now, call training model with optimized parameters clf_tree = DecisionTreeRegressor(max_depth=3,max_leaf_nodes=8,splitter="best", random_state=1) clf_tree.fit(X_train,Y_train) predictions_tree = clf_tree.predict(X_test) print(r2_score(predictions_tree, Y_test)) # In[ ]: #Show which features are most significant feats = {} for feature, importance in zip(X_train.columns, clf_tree.feature_importances_): feats[feature] = importance #add the name/value pair importances = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0: 'Gini-importance'}) importances.sort_values(by='Gini-importance',ascending=False).plot(kind='bar', rot=45) print(importances.sort_values(by='Gini-importance',ascending=False)) print() # To better understand this learning algorithm, we can visualize the decision tree in the graph below: # # ** Edit: ** I could not get the package pydotplus to work on kaggle. # In[ ]: import pydotplus from sklearn import tree import collections from PIL import Image dot_data = tree.export_graphviz(clf_tree, feature_names=X_train.columns, out_file=None,filled=True, rounded=True) graph = pydotplus.graph_from_dot_data(dot_data) colors = ( 'lightblue','orange') edges = collections.defaultdict(list) for edge in graph.get_edge_list(): edges[edge.get_source()].append(int(edge.get_destination())) for edge in edges: edges[edge].sort() for i in range(2): dest = graph.get_node(str(edges[edge][i]))[0] dest.set_fillcolor(colors[i]) graph.write_png('tree.png') from IPython.display import Image Image("tree.png") # In[ ]: from sklearn.linear_model import LinearRegression #Train and fit Linear Regression linreg = LinearRegression() linreg.fit(X_train,Y_train) predictions_linear = linreg.predict(X_test) r2_score(predictions_linear, Y_test) # In[ ]: from sklearn.ensemble import AdaBoostRegressor #Train and fit AdaBoost Regressor adaboost = AdaBoostRegressor(n_estimators=5, learning_rate=2) adaboost.fit(X_train, Y_train) predictions_boost = adaboost.predict(X_test) r2_score(predictions_boost, Y_test) # In[ ]: #Show which features are most significant feats = {} for feature, importance in zip(X_train.columns, adaboost.feature_importances_): feats[feature] = importance #add the name/value pair importances = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0: 'Gini-importance'}) importances.sort_values(by='Gini-importance',ascending=False).plot(kind='bar', rot=45) print(importances.sort_values(by='Gini-importance',ascending=False)) print() # # Conclusion: # # In conclusion, we have seen that features like "region" and "sex" do not influence insurance charges. This was further verified by showing the feauture impotrances of the decision tree regressor and the AdaBoost regressor. As it was predicted during the initial analysis, smoking formed the largest influence on costs, followed by BMI and age, respectively. The number of dependents was shown to have little albeit presence influence (less than 2%). # # The accuracy for Linear Regression was low, 65%. Whereas both AdaBoost and DecisionTree had accuracy scores near 80%. Although the scores are similar, DecisionTree would likely be preferable when working with large data, because it would require less running time and processing. # In[ ]:
#!/usr/bin/env python # coding: utf-8 # # Analyzing Student's Behavior and Model suggestion for classification levels # ### Marlon Ferrari # > #### This Data Science project was made under Capstone Data Science IBM Certification Program. # ## Table of contents # * [Introduction: Business Problem](#introduction) # * [Data](#data) # * [Methodology](#methodology) # * [Analysis](#analysis) # * [Results and Discussion](#results) # * [Conclusion](#conclusion) # # 1. Introduction <a name="introduction"></a> # A description of the problem and a discussion of the background # # The Internet revolution brought more than social medias and faster information exchanges. It brought also a generation of people who studies through the digital environments. Under this context, the online education evolved quickly and the transformation of the societies really started. Nowadays, people in distant places, poor countries can benefit from technology to achieve information and in this case, the Massive Open Online Courses, MOOCs had a major role. # MOOCs can join people all around the world to achieve understand in a wide range of areas, delivering science and culture. # # It is known, also, that online learning suffers massive unenrollment. The logical border and the lack of motivation can make the students leave. Under this context, what are the related features which causes it? How understand the student scenario and predict his churn or low grades? # I think that is a relevant point. If MOOCs platforms achieve student understanding and predicting, I think it's possible to menage the student's churn and find a way to give them the needed motivation. # # With this set in mind, I started a search for MOOCs generated Students Data to investigate and prepare some conclusions about the theme. # # # 2. Data # A description of the data and how it will be used to solve the problem # # To guide my investigation, I was looking for a Set to help to understand the student's behavior, motivation and correlated characteristics in order to better understand why or how is the result of an enrollment. So, it is important to find a dataset with some key features like grade, gender, enrollment levels, and so on. Location data is also important to understand cultural marks, which will be explored by locations APIs. # Guided by the analysis exploration, I'll be able to build a model to predict student's behavior or results. # After querying correlated datasets in order to find those with better columns, I found a nice DataSet from Kaggle called "Students' Academic Performance Dataset". You can check it here https://www.kaggle.com/aljarah/xAPI-Edu-Data. # <p> The data compounds 16 columns with aggregated informations about over 480 students of a Learning Platform called Kalboard360. The datails will be shown next section. # # ## 2.1 Data Structure # As previously mentioned, this dataset includes 16 columns: # # 1. Gender - student's gender (nominal: 'Male' or 'Female’) # # 2. Nationality- student's nationality (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’) # # 3. Place of birth- student's Place of birth (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’) # # 4. Educational Stages- educational level student belongs (nominal: ‘lowerlevel’,’MiddleSchool’,’HighSchool’) # # 5. Grade Levels- grade student belongs (nominal: ‘G-01’, ‘G-02’, ‘G-03’, ‘G-04’, ‘G-05’, ‘G-06’, ‘G-07’, ‘G-08’, ‘G-09’, ‘G-10’, ‘G-11’, ‘G-12 ‘) # # 6. Section ID- classroom student belongs (nominal:’A’,’B’,’C’) # # 7. Topic- course topic (nominal:’ English’,’ Spanish’, ‘French’,’ Arabic’,’ IT’,’ Math’,’ Chemistry’, ‘Biology’, ‘Science’,’ History’,’ Quran’,’ Geology’) # # 8. Semester- school year semester (nominal:’ First’,’ Second’) # # 9. Parent responsible for student (nominal:’mom’,’father’) # # 10. Raised hand- how many times the student raises his/her hand on classroom (numeric:0-100) # # 11. Visited resources- how many times the student visits a course content(numeric:0-100) # # 12. Viewing announcements-how many times the student checks the new announcements(numeric:0-100) # # 13. Discussion groups- how many times the student participate on discussion groups (numeric:0-100) # # 14. Parent Answering Survey- parent answered the surveys which are provided from school or not (nominal:’Yes’,’No’) # # 15. Parent School Satisfaction- the Degree of parent satisfaction from school(nominal:’Yes’,’No’) # # 16. Student Absence Days-the number of absence days for each student (nominal: above-7, under-7) # # The most important characteristic of this dataset is that it has included the parent's data, which is a nice approach to understand the student. # # 3. Methodology # # The first steps are the data exploration and insight-taking approach in order to better understand the data and the columns. The purpose of this exploratory analysis is to identify hidden features and understand the relations between the features. # Next, I'll do a descritive analysis by building a dataset for a clustering algorithm. This way, the data understanding will become a more powerfull decision making, focused on student's behaviors. # Finally, I'll create a my predictive analysis by building a dataset with the best features for a supervised learning algorithm to predict the student's beahvior under certain conditions, which will achieve my final objective. # # 4. Analysis # As mentioned, this section will understand the data in order to compose the clustering dataset. # ### 4.1 Exploratory Analysis # In[110]: import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt # In[111]: dataset = pd.read_csv("../../../input/aljarah_xAPI-Edu-Data/xAPI-Edu-Data.csv") dataset.head(5) # In the context to understand the student and his results, setting up a dataframe with certain columns # In[112]: df = dataset[['gender','PlaceofBirth','StageID','Topic','raisedhands','VisITedResources','AnnouncementsView','Discussion', 'ParentAnsweringSurvey','ParentschoolSatisfaction','StudentAbsenceDays', 'Class']] df.head() # Try to understand the results from countries # In[113]: df.groupby(['ParentschoolSatisfaction'])['Class'].value_counts(normalize=True) # In[114]: df.groupby(['ParentAnsweringSurvey'])['ParentschoolSatisfaction'].value_counts(normalize=True) # It seems that parents which aren't envolved in answering the scholar's surveys are likely to become unsatisfied with the School. This can mean that well informed parents can better understand the student's enrollment and reality and are better satisfied. # ### Question: What is the relation between active parents and student's classification? # In[115]: df.groupby(['ParentAnsweringSurvey'])['Class'].value_counts(normalize=True) # So, definitively parent's active behavior has an important role on student's growth. # ## Understanding student's behavior # Next, it is important to know what characteristics are linked to students sucess. So, we're going to test the features related. # In[116]: df2 = dataset[['gender','raisedhands','VisITedResources','AnnouncementsView','Discussion','StudentAbsenceDays', 'Class']] df2.head() # ### Question: What's the relation between raising hands and classification? # In[117]: df2['raisedhands'] = pd.cut(df2.raisedhands, bins=3, labels=np.arange(3), right=False) df2.groupby(['raisedhands'])['Class'].value_counts(normalize=True) # So, it seems that students which has low levels of raising hands are most likely to have Low classification. In the otherside, high frequency of raising hands are linked to higher classification. # Next, we're going to check the act of visiting the course resources. # In[118]: df2['VisITedResources'] = pd.cut(df2.VisITedResources, bins=3, labels=np.arange(3), right=False) df2.groupby(['VisITedResources'])['Class'].value_counts(normalize=True) # Low levels of resource exploring means lower levels of classification. High levels of visiting resources are linked to higher classification. # In[119]: df2['AnnouncementsView'] = pd.cut(df2.AnnouncementsView, bins=3, labels=np.arange(3), right=False) df2.groupby(['AnnouncementsView'])['Class'].value_counts(normalize=True) # The act of visualizing the announcements makes the students more prepared for the tasks and they are most likely to plan the assessments of the week. High visualization frequency is lined, indeed, to better classifications. # In[120]: df2['Discussion'] = pd.cut(df2.Discussion, bins=3, labels=np.arange(3), right=False) df2.groupby(['Discussion'])['Class'].value_counts(normalize=True) # Suprisingly, discussion frequency is weakly linked to higher results, at least, directly. Of course, there are higher interactions levels ocrring with Higher graded students but the data shows that discussion is a secondary act. # Concluding this step on analysis, we're going to understand the absence rate with the grade level # In[121]: df2.groupby(['StudentAbsenceDays'])['Class'].value_counts(normalize=True) # As expected, the lower the absence of the student, the higher tends to become their classification. Let's keep this feature. # ### 4.1.1 Clustering DataSet # Now that we know what are the important features to understand the student's behavior and classification, we're going to build a dataset for a K-Means algorithm, which will show the student's cluster. # To make the construction process easiest to understand, we're going to reimplement the dataset building phases. # In[122]: df2 = dataset[['gender','raisedhands','VisITedResources','AnnouncementsView','Discussion','StudentAbsenceDays', 'Class']] df2.tail() # Let's identify the correlations between the student's actions # In[123]: correlation = df2[['raisedhands','VisITedResources','AnnouncementsView','Discussion']].corr(method='pearson') correlation # This made clear that our best correlated features are raisedHands and visitedResources, which will compose our model dataset further. # So, we need an <b>one hot encoding</b> on columns gender,absence and class # In[124]: df2 = pd.concat([df2,pd.get_dummies(df2['gender'], prefix='gender_')], axis=1) df2 = pd.concat([df2,pd.get_dummies(df2['StudentAbsenceDays'], prefix='absence_')], axis=1) df2 = pd.concat([df2,pd.get_dummies(df2['Class'], prefix='class_')], axis=1) df2.drop(['gender'], axis = 1,inplace=True) df2.drop(['StudentAbsenceDays'], axis = 1,inplace=True) df2.drop(['Class'], axis = 1,inplace=True) df2.head() # In[125]: from sklearn.cluster import KMeans from sklearn import preprocessing # So, based on previous exploratory analysis, was possible to identify that raised hands and announcements visualization brings most results respect high classification. So, both features will compound our X axis # In[126]: X = df2[['raisedhands', 'VisITedResources']].values #NORMALIZE OUR ARRAY min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(X) #GET X AXIS X = pd.DataFrame(x_scaled).values X[:5] # Using the Elbow Method to find the best K for Kmeans based on our data # In[127]: wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++') kmeans.fit(X) #print (i,kmeans.inertia_) wcss.append(kmeans.inertia_) plt.plot(range(1, 11), wcss) plt.title('Elbow Method') plt.xlabel('N of Clusters') plt.ylabel('WSS') #within cluster sum of squares print() # So, the ideal K is 3. Now we are going to build the Kmeans with k=3 # In[128]: kmeans = KMeans(n_clusters = 3, init = 'k-means++') kmeans.fit(X) # In[129]: k_means_labels = kmeans.labels_ k_means_cluster_centers = kmeans.cluster_centers_ # In[130]: import matplotlib.pyplot as plt plt.scatter(X[:, 0], X[:,1], s = 10, c = kmeans.labels_) plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 30, c = 'red',label = 'Centroids') plt.title('Students Clustering') plt.xlabel('RaisedHands') plt.ylabel('VisITedResources') plt.legend() print() # So, now we can see 3 clusters: # * High applied Students # * Mid Applied Students # * Low Applied Students # ### 4.1.2 Building a supervised algorithm # Now it's time to build a model to predict the student's classification based on his actions on the Online Learning enviroment. # In[131]: df3 = dataset[['raisedhands','VisITedResources','AnnouncementsView','Discussion','Class']] df3.head() # Extract our dependent variable Y # In[132]: y = df3['Class'].values y[0:5] # Gets X independent variables and normalize them # In[133]: X = df3[['raisedhands', 'VisITedResources', 'AnnouncementsView', 'Discussion']].values X= preprocessing.StandardScaler().fit(X).transform(X) X[:5] # Now, we're going to check the accuracy and performance of the following models: # # * K Nearest Neighbor(KNN) # * Decision Tree # * Support Vector Machine # * Logistic Regression # Create the train | test sets # In[134]: from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) # #### KNN # In[135]: from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics Ks = 10 mean_acc = np.zeros((Ks-1)) std_acc = np.zeros((Ks-1)) ConfustionMx = []; for n in range(1,Ks): #Train Model and Predict neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train) yhat=neigh.predict(X_test) mean_acc[n-1] = metrics.accuracy_score(y_test, yhat) std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1) # In[136]: #REBUILDING THE MODEL WITH BEST K k = 4 #Train Model and Predict neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train) neigh # #### Decision Tree # In[137]: from sklearn.tree import DecisionTreeClassifier Dtree = DecisionTreeClassifier(criterion="entropy", max_depth = 4) Dtree.fit(X_train,y_train) Dtree # #### Suport Vector Machine - SVM # In[138]: from sklearn import svm supMac = svm.SVC(kernel='rbf', gamma='auto') supMac.fit(X_train, y_train) # #### Logistic Regression # In[139]: from sklearn.linear_model import LogisticRegression LR = LogisticRegression(C=0.01, solver='liblinear', multi_class='auto').fit(X_train,y_train) LR # ### Model Performance # In[140]: from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import f1_score from sklearn.metrics import log_loss # In[141]: knn_yhat = neigh.predict(X_test) print("KNN Jaccard index: %.2f" % jaccard_similarity_score(y_test, knn_yhat)) print("KNN F1-score: %.2f" % f1_score(y_test, knn_yhat, average='weighted') ) # In[142]: dtree_yhat = Dtree.predict(X_test) print("Decision Tree Jaccard index: %.2f" % jaccard_similarity_score(y_test, dtree_yhat)) print("Decision Tree F1-score: %.2f" % f1_score(y_test, dtree_yhat, average='weighted') ) # In[143]: svm_yhat = supMac.predict(X_test) print("SVM Jaccard index: %.2f" % jaccard_similarity_score(y_test, svm_yhat)) print("SVM F1-score: %.2f" % f1_score(y_test, svm_yhat, average='weighted') ) # In[144]: LR_yhat = LR.predict(X_test) LR_yhat_prob = LR.predict_proba(X_test) print("LR Jaccard index: %.2f" % jaccard_similarity_score(y_test, LR_yhat)) print("LR F1-score: %.2f" % f1_score(y_test, LR_yhat, average='weighted') ) print("LR LogLoss: %.2f" % log_loss(y_test, LR_yhat_prob)) # # Results and Discussion # The presented research focused in data analytics and building a machine learning model to understand the student's behavior and classification under online learning courses. # The results concluded some points: # * Parents active participation and tracking are important. Absent parents are linked to absent students and more unsatisfaction with the school. # * Students who read announcements and visit the course resources are most likely to have higher classification. # * Actions related to discussions are less likely to improve student's classification. # # As a final result, a predictive model is offered in order to help the online platforms to understand the student's acts and take decisions. The best model was the K-Nearest neighbors with k=4 and accuracy of 0.65 Jaccard Index. # Finally, it is important to mention that location data was not possible to be used. That's because it refers to Born location of the student and this is not a important feature. So, it could be a more important data the place where the student was conected, because the high absence levels could be related to poor Internet connection areas, like conflicted-areas and under development countries. So, this research can be used as a starting point for further works and model adaptation. # # Conclusion # Data analytics and Data Science are vital fields for improving Online Courses experience. Set the right content for the right student is a complex but essential task to keep the students enrolled, motivated and getting high classification. This will, with no doubt, improve education levels of their countries and help to improve their economy. Despite that, online learning has a potential value for increasing society levels. # In[ ]:
def sumSimple(num1, num2): return num1 + num2 def restSimple(num1, num2): return num1 - num2 def multSimple(num1, num2): return num1 * num2
rows = 50 cols = 50 width = 800 / cols height = 800 / rows # Helper function to determine if given point is in range of grid def in_range(x, y): return 0 <= x < cols and 0 <= y < rows class DFS(): def __init__(self, grid, start_x, start_y, end_x, end_y, screen): self.grid = grid self.start_x = start_x self.start_y = start_y self.end_x = end_x self.end_y = end_y self.screen = screen self.agenda = [] def solve(self): if not self.is_empty(): temporary = self.peek() self.remove() self.add_points(temporary) if temporary.x == self.end_x and temporary.y == self.end_y: print("Path Found.") return True temporary.mark_visited() else: print("No Path.") return True def remove(self): self.agenda.pop() def peek(self): return self.agenda[len(self.agenda) - 1] def is_empty(self): return len(self.agenda) == 0 def add(self, point): self.agenda.append(point) def add_points(self, point): if in_range(point.x, point.y + 1): point1 = self.grid[point.x][point.y + 1] if not point1.is_wall() and not point1.has_been_visited(): self.add(point1) if in_range(point.x, point.y - 1): point2 = self.grid[point.x][point.y - 1] if not point2.is_wall() and not point2.has_been_visited(): self.add(point2) if in_range(point.x - 1, point.y): point3 = self.grid[point.x - 1][point.y] if not point3.is_wall() and not point3.has_been_visited(): self.add(point3) if in_range(point.x + 1, point.y): point4 = self.grid[point.x + 1][point.y] if not point4.is_wall() and not point4.has_been_visited(): self.add(point4)