mirror of
https://github.com/onyx-and-iris/grokking-algorithms.git
synced 2025-04-20 04:23:47 +01:00
Compare commits
3 Commits
f2d23203ae
...
d552050f7e
Author | SHA1 | Date | |
---|---|---|---|
d552050f7e | |||
1da4a16ec8 | |||
0f52cf0f75 |
3
chapter10/README.md
Normal file
3
chapter10/README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Approximation algorithm
|
||||||
|
|
||||||
|
- Easy to write, fast to run, useful for obtaining approximate solutions for NP-hard problems.
|
6
chapter11/README.md
Normal file
6
chapter11/README.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# Dynamic Programming
|
||||||
|
|
||||||
|
A programming technique for decomposing a problem into smaller discrete subproblems.
|
||||||
|
|
||||||
|
- Useful when trying to optimize something given a constraint.
|
||||||
|
- Example, items in a knapsack of size W that gives the greatest value.
|
9
chapter12/README.md
Normal file
9
chapter12/README.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# K-Nearest Neighbours
|
||||||
|
|
||||||
|
Useful for classification, regression and feature extraction. By examining a data point against its K nearest neighbours we can:
|
||||||
|
|
||||||
|
- categorize into a group
|
||||||
|
- predict responses
|
||||||
|
- convert the item into a list of features
|
||||||
|
|
||||||
|
A good starting point for machine learning.
|
@ -1,6 +1,5 @@
|
|||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
import time
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -33,11 +32,10 @@ SAMPLE_SIZE = 1000
|
|||||||
numbers = random.sample(range(LOWER, UPPER), SAMPLE_SIZE)
|
numbers = random.sample(range(LOWER, UPPER), SAMPLE_SIZE)
|
||||||
numbers.sort()
|
numbers.sort()
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
result = None
|
result = None
|
||||||
while result is None:
|
while result is None:
|
||||||
guess = random.randrange(LOWER, UPPER)
|
guess = random.randrange(LOWER, UPPER)
|
||||||
logger.debug(f"guess: {guess}")
|
logger.debug(f"guess: {guess}")
|
||||||
result = binary_search(numbers, 0, len(numbers) - 1, guess)
|
result = binary_search(numbers, 0, len(numbers) - 1, guess)
|
||||||
|
|
||||||
print(f"Found {guess} at index {result}. Running time {time.time() - start}")
|
print(f"Found {guess} at index {result}.")
|
||||||
|
5
chapter9/README.md
Normal file
5
chapter9/README.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# Shortest path for weighted graph (cost associated edges)
|
||||||
|
|
||||||
|
- Dijkstra's algorithm works when all weights are non-negative
|
||||||
|
- If there are negative weights use Bellman-Ford.
|
||||||
|
- Priority queue + min heap is optimal when compared to a function that operates on a list.
|
Loading…
x
Reference in New Issue
Block a user