INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Start accumulating inferences from assuming var = value.
def suppose(self, var, value): "Start accumulating inferences from assuming var=value." self.support_pruning() removals = [(var, a) for a in self.curr_domains[var] if a != value] self.curr_domains[var] = [value] return removals
Rule out var = value.
def prune(self, var, value, removals): "Rule out var=value." self.curr_domains[var].remove(value) if removals is not None: removals.append((var, value))
Return the partial assignment implied by the current inferences.
def infer_assignment(self): "Return the partial assignment implied by the current inferences." self.support_pruning() return dict((v, self.curr_domains[v][0]) for v in self.vars if 1 == len(self.curr_domains[v]))
Undo a supposition and all inferences from it.
def restore(self, removals): "Undo a supposition and all inferences from it." for B, b in removals: self.curr_domains[B].append(b)
Return a list of variables in current assignment that are in conflict
def conflicted_vars(self, current): "Return a list of variables in current assignment that are in conflict" return [var for var in self.vars if self.nconflicts(var, current[var], current) > 0]
The number of conflicts as recorded with each assignment. Count conflicts in row and in up down diagonals. If there is a queen there it can t conflict with itself so subtract 3.
def nconflicts(self, var, val, assignment): """The number of conflicts, as recorded with each assignment. Count conflicts in row and in up, down diagonals. If there is a queen there, it can't conflict with itself, so subtract 3.""" n = len(self.vars) c = self.rows[val] + self.downs[var+val] + self.ups[var-val+n-1] if assignment.get(var, None) == val: c -= 3 return c
Assign var and keep track of conflicts.
def assign(self, var, val, assignment): "Assign var, and keep track of conflicts." oldval = assignment.get(var, None) if val != oldval: if oldval is not None: # Remove old val if there was one self.record_conflict(assignment, var, oldval, -1) self.record_conflict(assignment, var, val, +1) CSP.assign(self, var, val, assignment)
Remove var from assignment ( if it is there ) and track conflicts.
def unassign(self, var, assignment): "Remove var from assignment (if it is there) and track conflicts." if var in assignment: self.record_conflict(assignment, var, assignment[var], -1) CSP.unassign(self, var, assignment)
Record conflicts caused by addition or deletion of a Queen.
def record_conflict(self, assignment, var, val, delta): "Record conflicts caused by addition or deletion of a Queen." n = len(self.vars) self.rows[val] += delta self.downs[var + val] += delta self.ups[var - val + n - 1] += delta
Print the queens and the nconflicts values ( for debugging ).
def display(self, assignment): "Print the queens and the nconflicts values (for debugging)." n = len(self.vars) for val in range(n): for var in range(n): if assignment.get(var,'') == val: ch = 'Q' elif (var+val) % 2 == 0: ch = '.' else: ch = '-' print ch, print ' ', for var in range(n): if assignment.get(var,'') == val: ch = '*' else: ch = ' ' print str(self.nconflicts(var, val, assignment))+ch, print
Find the best segmentation of the string of characters given the UnigramTextModel P.
def viterbi_segment(text, P): """Find the best segmentation of the string of characters, given the UnigramTextModel P.""" # best[i] = best probability for text[0:i] # words[i] = best word ending at position i n = len(text) words = [''] + list(text) best = [1.0] + [0.0] * n ## Fill in the vectors best, words via dynamic programming for i in range(n+1): for j in range(0, i): w = text[j:i] if P[w] * best[i - len(w)] >= best[i]: best[i] = P[w] * best[i - len(w)] words[i] = w ## Now recover the sequence of best words sequence = []; i = len(words)-1 while i > 0: sequence[0:0] = [words[i]] i = i - len(words[i]) ## Return sequence of best words and overall probability return sequence, best[-1]
Encodes text using a code which is a permutation of the alphabet.
def encode(plaintext, code): "Encodes text, using a code which is a permutation of the alphabet." from string import maketrans trans = maketrans(alphabet + alphabet.upper(), code + code.upper()) return plaintext.translate(trans)
Count 1 for P [ ( w1... wn ) ] and for P ( wn | ( w1... wn - 1 )
def add(self, ngram): """Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)""" CountingProbDist.add(self, ngram) self.cond_prob[ngram[:-1]].add(ngram[-1])
Add each of the tuple words [ i: i + n ] using a sliding window. Prefix some copies of the empty word to make the start work.
def add_sequence(self, words): """Add each of the tuple words[i:i+n], using a sliding window. Prefix some copies of the empty word, '', to make the start work.""" n = self.n words = ['',] * (n-1) + words for i in range(len(words)-n): self.add(tuple(words[i:i+n]))
Build up a random sample of text nwords words long using the conditional probability given the n - 1 preceding words.
def samples(self, nwords): """Build up a random sample of text nwords words long, using the conditional probability given the n-1 preceding words.""" n = self.n nminus1gram = ('',) * (n-1) output = [] for i in range(nwords): if nminus1gram not in self.cond_prob: nminus1gram = ('',) * (n-1) # Cannot continue, so restart. wn = self.cond_prob[nminus1gram].sample() output.append(wn) nminus1gram = nminus1gram[1:] + (wn,) return ' '.join(output)
Index a whole collection of files.
def index_collection(self, filenames): "Index a whole collection of files." for filename in filenames: self.index_document(open(filename).read(), filename)
Index the text of a document.
def index_document(self, text, url): "Index the text of a document." ## For now, use first line for title title = text[:text.index('\n')].strip() docwords = words(text) docid = len(self.documents) self.documents.append(Document(title, url, len(docwords))) for word in docwords: if word not in self.stopwords: self.index[word][docid] += 1
Return a list of n ( score docid ) pairs for the best matches. Also handle the special syntax for learn: command.
def query(self, query_text, n=10): """Return a list of n (score, docid) pairs for the best matches. Also handle the special syntax for 'learn: command'.""" if query_text.startswith("learn:"): doctext = os.popen(query_text[len("learn:"):], 'r').read() self.index_document(doctext, query_text) return [] qwords = [w for w in words(query_text) if w not in self.stopwords] shortest = argmin(qwords, lambda w: len(self.index[w])) docs = self.index[shortest] results = [(sum([self.score(w, d) for w in qwords]), d) for d in docs] results.sort(); results.reverse() return results[:n]
Compute a score for this word on this docid.
def score(self, word, docid): "Compute a score for this word on this docid." ## There are many options; here we take a very simple approach return (math.log(1 + self.index[word][docid]) / math.log(1 + self.documents[docid].nwords))
Present the results as a list.
def present(self, results): "Present the results as a list." for (score, d) in results: doc = self.documents[d] print ("%5.2f|%25s | %s" % (100 * score, doc.url, doc.title[:45].expandtabs()))
Get results for the query and present them.
def present_results(self, query_text, n=10): "Get results for the query and present them." self.present(self.query(query_text, n))
Return a score for text based on how common letters pairs are.
def score(self, plaintext): "Return a score for text based on how common letters pairs are." s = 1.0 for bi in bigrams(plaintext): s = s * self.P2[bi] return s
Search for a decoding of the ciphertext.
def decode(self, ciphertext): "Search for a decoding of the ciphertext." self.ciphertext = ciphertext problem = PermutationDecoderProblem(decoder=self) return search.best_first_tree_search( problem, lambda node: self.score(node.state))
Score is product of word scores unigram scores and bigram scores. This can get very small so we use logs and exp.
def score(self, code): """Score is product of word scores, unigram scores, and bigram scores. This can get very small, so we use logs and exp.""" text = permutation_decode(self.ciphertext, code) logP = (sum([log(self.Pwords[word]) for word in words(text)]) + sum([log(self.P1[c]) for c in text]) + sum([log(self.P2[b]) for b in bigrams(text)])) return exp(logP)
Returns a SettingDict object.
def get_value(self, context, default): """ Returns a ``SettingDict`` object. """ if default is None: settings = self.setting_model.objects.as_dict() else: settings = self.setting_model.objects.as_dict(default=default) return settings
Returns the value of the named setting.
def get_value(self, context, name, default): """ Returns the value of the named setting. """ settings = self.setting_model.objects.filter(name=name) if default is None: settings = settings.as_dict() else: settings = settings.as_dict(default=default) value = settings[name] return value
Returns the value of the named setting.
def render_tag(self, context, name, nodelist): """ Returns the value of the named setting. """ # Use `try` and `except` instead of `setdefault()` so we can skip # rendering the nodelist when the setting already exists. settings = self.setting_model.objects.filter(name=name).as_dict() try: value = settings[name] except KeyError: value = settings[name] = nodelist.render(context) return value
Solving an MDP by value iteration. [ Fig. 17. 4 ]
def value_iteration(mdp, epsilon=0.001): "Solving an MDP by value iteration. [Fig. 17.4]" U1 = dict([(s, 0) for s in mdp.states]) R, T, gamma = mdp.R, mdp.T, mdp.gamma while True: U = U1.copy() delta = 0 for s in mdp.states: U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)]) for a in mdp.actions(s)]) delta = max(delta, abs(U1[s] - U[s])) if delta < epsilon * (1 - gamma) / gamma: return U
Given an MDP and a utility function U determine the best policy as a mapping from state to action. ( Equation 17. 4 )
def best_policy(mdp, U): """Given an MDP and a utility function U, determine the best policy, as a mapping from state to action. (Equation 17.4)""" pi = {} for s in mdp.states: pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp)) return pi
The expected utility of doing a in state s according to the MDP and U.
def expected_utility(a, s, U, mdp): "The expected utility of doing a in state s, according to the MDP and U." return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])
Solve an MDP by policy iteration [ Fig. 17. 7 ]
def policy_iteration(mdp): "Solve an MDP by policy iteration [Fig. 17.7]" U = dict([(s, 0) for s in mdp.states]) pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states]) while True: U = policy_evaluation(pi, U, mdp) unchanged = True for s in mdp.states: a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp)) if a != pi[s]: pi[s] = a unchanged = False if unchanged: return pi
Return an updated utility mapping U from each state in the MDP to its utility using an approximation ( modified policy iteration ).
def policy_evaluation(pi, U, mdp, k=20): """Return an updated utility mapping U from each state in the MDP to its utility, using an approximation (modified policy iteration).""" R, T, gamma = mdp.R, mdp.T, mdp.gamma for i in range(k): for s in mdp.states: U[s] = R(s) + gamma * sum([p * U[s1] for (p, s1) in T(s, pi[s])]) return U
Return the state that results from going in this direction.
def go(self, state, direction): "Return the state that results from going in this direction." state1 = vector_add(state, direction) return if_(state1 in self.states, state1, state)
Convert a mapping from ( x y ) to v into a [[... v... ]] grid.
def to_grid(self, mapping): """Convert a mapping from (x, y) to v into a [[..., v, ...]] grid.""" return list(reversed([[mapping.get((x,y), None) for x in range(self.cols)] for y in range(self.rows)]))
Returns a SettingDict object for this queryset.
def as_dict(self, default=None): """ Returns a ``SettingDict`` object for this queryset. """ settings = SettingDict(queryset=self, default=default) return settings
Creates and returns an object of the appropriate type for value.
def create(self, name, value): """ Creates and returns an object of the appropriate type for ``value``. """ if value is None: raise ValueError('Setting value cannot be `None`.') model = Setting.get_model_for_value(value) # Call `create()` method on the super class to avoid recursion. obj = super(SettingQuerySet, model.objects.all()) \ .create(name=name, value=value) return obj
Iterates through setting value subclasses returning one that is compatible with the type of value. Calls is_compatible () on each subclass.
def get_model_for_value(cls, value): """ Iterates through setting value subclasses, returning one that is compatible with the type of ``value``. Calls ``is_compatible()`` on each subclass. """ for related_object in get_all_related_objects(cls._meta): model = getattr(related_object, 'related_model', related_object.model) if issubclass(model, cls): if model.is_compatible(value): return model raise ValueError( 'No compatible `SettingValueModel` subclass for %r' % value)
Returns True if this model should be used to store value.
def is_compatible(cls, value): """ Returns ``True`` if this model should be used to store ``value``. Checks if ``value`` is an instance of ``value_type``. Override this method if you need more advanced behaviour. For example, to distinguish between single and multi-line text. """ if not hasattr(cls, 'value_type'): raise NotImplementedError( 'You must define a `value_type` attribute or override the ' '`is_compatible()` method on `SettingValueModel` subclasses.') return isinstance(value, cls.value_type)
Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Don t worry about repeated paths to a state. [ Fig. 3. 7 ]
def tree_search(problem, frontier): """Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Don't worry about repeated paths to a state. [Fig. 3.7]""" frontier.append(Node(problem.initial)) while frontier: node = frontier.pop() if problem.goal_test(node.state): return node frontier.extend(node.expand(problem)) return None
Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. If two paths reach a state only use the first one. [ Fig. 3. 7 ]
def graph_search(problem, frontier): """Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. If two paths reach a state, only use the first one. [Fig. 3.7]""" frontier.append(Node(problem.initial)) explored = set() while frontier: node = frontier.pop() if problem.goal_test(node.state): return node explored.add(node.state) frontier.extend(child for child in node.expand(problem) if child.state not in explored and child not in frontier) return None
[ Fig. 3. 11 ]
def breadth_first_search(problem): "[Fig. 3.11]" node = Node(problem.initial) if problem.goal_test(node.state): return node frontier = FIFOQueue() frontier.append(node) explored = set() while frontier: node = frontier.pop() explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): return child frontier.append(child) return None
Search the nodes with the lowest f scores first. You specify the function f ( node ) that you want to minimize ; for example if f is a heuristic estimate to the goal then we have greedy best first search ; if f is node. depth then we have breadth - first search. There is a subtlety: the line f = memoize ( f f ) means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.
def best_first_graph_search(problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" f = memoize(f, 'f') node = Node(problem.initial) if problem.goal_test(node.state): return node frontier = PriorityQueue(min, f) frontier.append(node) explored = set() while frontier: node = frontier.pop() if problem.goal_test(node.state): return node explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) return None
[ Fig. 3. 17 ]
def depth_limited_search(problem, limit=50): "[Fig. 3.17]" def recursive_dls(node, problem, limit): if problem.goal_test(node.state): return node elif node.depth == limit: return 'cutoff' else: cutoff_occurred = False for child in node.expand(problem): result = recursive_dls(child, problem, limit) if result == 'cutoff': cutoff_occurred = True elif result is not None: return result return if_(cutoff_occurred, 'cutoff', None) # Body of depth_limited_search: return recursive_dls(Node(problem.initial), problem, limit)
[ Fig. 3. 18 ]
def iterative_deepening_search(problem): "[Fig. 3.18]" for depth in xrange(sys.maxint): result = depth_limited_search(problem, depth) if result != 'cutoff': return result
A * search is best - first graph search with f ( n ) = g ( n ) + h ( n ). You need to specify the h function when you call astar_search or else in your Problem subclass.
def astar_search(problem, h=None): """A* search is best-first graph search with f(n) = g(n)+h(n). You need to specify the h function when you call astar_search, or else in your Problem subclass.""" h = memoize(h or problem.h, 'h') return best_first_graph_search(problem, lambda n: n.path_cost + h(n))
[ Fig. 3. 26 ]
def recursive_best_first_search(problem, h=None): "[Fig. 3.26]" h = memoize(h or problem.h, 'h') def RBFS(problem, node, flimit): if problem.goal_test(node.state): return node, 0 # (The second value is immaterial) successors = node.expand(problem) if len(successors) == 0: return None, infinity for s in successors: s.f = max(s.path_cost + h(s), node.f) while True: successors.sort(lambda x,y: cmp(x.f, y.f)) # Order by lowest f value best = successors[0] if best.f > flimit: return None, best.f if len(successors) > 1: alternative = successors[1].f else: alternative = infinity result, best.f = RBFS(problem, best, min(flimit, alternative)) if result is not None: return result, best.f node = Node(problem.initial) node.f = h(node) result, bestf = RBFS(problem, node, infinity) return result
From the initial node keep choosing the neighbor with highest value stopping when no neighbor is better. [ Fig. 4. 2 ]
def hill_climbing(problem): """From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Fig. 4.2]""" current = Node(problem.initial) while True: neighbors = current.expand(problem) if not neighbors: break neighbor = argmax_random_tie(neighbors, lambda node: problem.value(node.state)) if problem.value(neighbor.state) <= problem.value(current.state): break current = neighbor return current.state
One possible schedule function for simulated annealing
def exp_schedule(k=20, lam=0.005, limit=100): "One possible schedule function for simulated annealing" return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
[ Fig. 4. 5 ]
def simulated_annealing(problem, schedule=exp_schedule()): "[Fig. 4.5]" current = Node(problem.initial) for t in xrange(sys.maxint): T = schedule(t) if T == 0: return current neighbors = current.expand(problem) if not neighbors: return current next = random.choice(neighbors) delta_e = problem.value(next.state) - problem.value(current.state) if delta_e > 0 or probability(math.exp(delta_e/T)): current = next
Call genetic_algorithm on the appropriate parts of a problem. This requires the problem to have states that can mate and mutate plus a value method that scores states.
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.1, n=20): """Call genetic_algorithm on the appropriate parts of a problem. This requires the problem to have states that can mate and mutate, plus a value method that scores states.""" s = problem.initial_state states = [problem.result(s, a) for a in problem.actions(s)] random.shuffle(states) return genetic_algorithm(states[:n], problem.value, ngen, pmut)
[ Fig. 4. 8 ]
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.1): "[Fig. 4.8]" for i in range(ngen): new_population = [] for i in len(population): fitnesses = map(fitness_fn, population) p1, p2 = weighted_sample_with_replacement(population, fitnesses, 2) child = p1.mate(p2) if random.uniform(0, 1) < pmut: child.mutate() new_population.append(child) population = new_population return argmax(population, fitness_fn)
Construct a random graph with the specified nodes and random links. The nodes are laid out randomly on a ( width x height ) rectangle. Then each node is connected to the min_links nearest neighbors. Because inverse links are added some nodes will have more connections. The distance between nodes is the hypotenuse times curvature () where curvature () defaults to a random number between 1. 1 and 1. 5.
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300, curvature=lambda: random.uniform(1.1, 1.5)): """Construct a random graph, with the specified nodes, and random links. The nodes are laid out randomly on a (width x height) rectangle. Then each node is connected to the min_links nearest neighbors. Because inverse links are added, some nodes will have more connections. The distance between nodes is the hypotenuse times curvature(), where curvature() defaults to a random number between 1.1 and 1.5.""" g = UndirectedGraph() g.locations = {} ## Build the cities for node in nodes: g.locations[node] = (random.randrange(width), random.randrange(height)) ## Build roads from each city to at least min_links nearest neighbors. for i in range(min_links): for node in nodes: if len(g.get(node)) < min_links: here = g.locations[node] def distance_to_node(n): if n is node or g.get(node,n): return infinity return distance(g.locations[n], here) neighbor = argmin(nodes, distance_to_node) d = distance(g.locations[neighbor], here) * curvature() g.connect(node, neighbor, int(d)) return g
Return a random Boggle board of size n x n. We represent a board as a linear list of letters.
def random_boggle(n=4): """Return a random Boggle board of size n x n. We represent a board as a linear list of letters.""" cubes = [cubes16[i % 16] for i in range(n*n)] random.shuffle(cubes) return map(random.choice, cubes)
Print the board in a 2 - d array.
def print_boggle(board): "Print the board in a 2-d array." n2 = len(board); n = exact_sqrt(n2) for i in range(n2): if i % n == 0 and i > 0: print if board[i] == 'Q': print 'Qu', else: print str(board[i]) + ' ', print
Return a list of lists where the i - th element is the list of indexes for the neighbors of square i.
def boggle_neighbors(n2, cache={}): """Return a list of lists, where the i-th element is the list of indexes for the neighbors of square i.""" if cache.get(n2): return cache.get(n2) n = exact_sqrt(n2) neighbors = [None] * n2 for i in range(n2): neighbors[i] = [] on_top = i < n on_bottom = i >= n2 - n on_left = i % n == 0 on_right = (i+1) % n == 0 if not on_top: neighbors[i].append(i - n) if not on_left: neighbors[i].append(i - n - 1) if not on_right: neighbors[i].append(i - n + 1) if not on_bottom: neighbors[i].append(i + n) if not on_left: neighbors[i].append(i + n - 1) if not on_right: neighbors[i].append(i + n + 1) if not on_left: neighbors[i].append(i - 1) if not on_right: neighbors[i].append(i + 1) cache[n2] = neighbors return neighbors
If n2 is a perfect square return its square root else raise error.
def exact_sqrt(n2): "If n2 is a perfect square, return its square root, else raise error." n = int(math.sqrt(n2)) assert n * n == n2 return n
Solve inverse Boggle by hill - climbing: find a high - scoring board by starting with a random one and changing it.
def boggle_hill_climbing(board=None, ntimes=100, verbose=True): """Solve inverse Boggle by hill-climbing: find a high-scoring board by starting with a random one and changing it.""" finder = BoggleFinder() if board is None: board = random_boggle() best = len(finder.set_board(board)) for _ in range(ntimes): i, oldc = mutate_boggle(board) new = len(finder.set_board(board)) if new > best: best = new if verbose: print best, _, board else: board[i] = oldc ## Change back if verbose: print_boggle(board) return board, best
Prints a table of results like this: >>> compare_graph_searchers () Searcher Romania ( A B ) Romania ( O N ) Australia breadth_first_tree_search < 21/ 22/ 59/ B > <1158/ 1159/ 3288/ N > < 7/ 8/ 22/ WA > breadth_first_search < 7/ 11/ 18/ B > < 19/ 20/ 45/ N > < 2/ 6/ 8/ WA > depth_first_graph_search < 8/ 9/ 20/ B > < 16/ 17/ 38/ N > < 4/ 5/ 11/ WA > iterative_deepening_search < 11/ 33/ 31/ B > < 656/ 1815/ 1812/ N > < 3/ 11/ 11/ WA > depth_limited_search < 54/ 65/ 185/ B > < 387/ 1012/ 1125/ N > < 50/ 54/ 200/ WA > recursive_best_first_search < 5/ 6/ 15/ B > <5887/ 5888/ 16532/ N > < 11/ 12/ 43/ WA >
def compare_graph_searchers(): """Prints a table of results like this: >>> compare_graph_searchers() Searcher Romania(A, B) Romania(O, N) Australia breadth_first_tree_search < 21/ 22/ 59/B> <1158/1159/3288/N> < 7/ 8/ 22/WA> breadth_first_search < 7/ 11/ 18/B> < 19/ 20/ 45/N> < 2/ 6/ 8/WA> depth_first_graph_search < 8/ 9/ 20/B> < 16/ 17/ 38/N> < 4/ 5/ 11/WA> iterative_deepening_search < 11/ 33/ 31/B> < 656/1815/1812/N> < 3/ 11/ 11/WA> depth_limited_search < 54/ 65/ 185/B> < 387/1012/1125/N> < 50/ 54/ 200/WA> recursive_best_first_search < 5/ 6/ 15/B> <5887/5888/16532/N> < 11/ 12/ 43/WA>""" compare_searchers(problems=[GraphProblem('A', 'B', romania), GraphProblem('O', 'N', romania), GraphProblem('Q', 'WA', australia)], header=['Searcher', 'Romania(A, B)', 'Romania(O, N)', 'Australia'])
List the nodes reachable in one step from this node.
def expand(self, problem): "List the nodes reachable in one step from this node." return [self.child_node(problem, action) for action in problem.actions(self.state)]
Fig. 3. 10
def child_node(self, problem, action): "Fig. 3.10" next = problem.result(self.state, action) return Node(next, self, action, problem.path_cost(self.path_cost, self.state, action, next))
Return a list of nodes forming the path from the root to this node.
def path(self): "Return a list of nodes forming the path from the root to this node." node, path_back = self, [] while node: path_back.append(node) node = node.parent return list(reversed(path_back))
Return a new individual crossing self and other.
def mate(self, other): "Return a new individual crossing self and other." c = random.randrange(len(self.genes)) return self.__class__(self.genes[:c] + other.genes[c:])
Make a digraph into an undirected graph by adding symmetric edges.
def make_undirected(self): "Make a digraph into an undirected graph by adding symmetric edges." for a in self.dict.keys(): for (b, distance) in self.dict[a].items(): self.connect1(b, a, distance)
Add a link from A and B of given distance and also add the inverse link if the graph is undirected.
def connect(self, A, B, distance=1): """Add a link from A and B of given distance, and also add the inverse link if the graph is undirected.""" self.connect1(A, B, distance) if not self.directed: self.connect1(B, A, distance)
Add a link from A to B of given distance in one direction only.
def connect1(self, A, B, distance): "Add a link from A to B of given distance, in one direction only." self.dict.setdefault(A,{})[B] = distance
Return a link distance or a dict of { node: distance } entries.. get ( a b ) returns the distance or None ;. get ( a ) returns a dict of { node: distance } entries possibly {}.
def get(self, a, b=None): """Return a link distance or a dict of {node: distance} entries. .get(a,b) returns the distance or None; .get(a) returns a dict of {node: distance} entries, possibly {}.""" links = self.dict.setdefault(a, {}) if b is None: return links else: return links.get(b)
h function is straight - line distance from a node s state to goal.
def h(self, node): "h function is straight-line distance from a node's state to goal." locs = getattr(self.graph, 'locations', None) if locs: return int(distance(locs[node.state], locs[self.goal])) else: return infinity
In the leftmost empty column try all non - conflicting rows.
def actions(self, state): "In the leftmost empty column, try all non-conflicting rows." if state[-1] is not None: return [] # All columns filled; no successors else: col = state.index(None) return [row for row in range(self.N) if not self.conflicted(state, row, col)]
Place the next queen at the given row.
def result(self, state, row): "Place the next queen at the given row." col = state.index(None) new = state[:] new[col] = row return new
Would placing a queen at ( row col ) conflict with anything?
def conflicted(self, state, row, col): "Would placing a queen at (row, col) conflict with anything?" return any(self.conflict(row, col, state[c], c) for c in range(col))
Would putting two queens in ( row1 col1 ) and ( row2 col2 ) conflict?
def conflict(self, row1, col1, row2, col2): "Would putting two queens in (row1, col1) and (row2, col2) conflict?" return (row1 == row2 ## same row or col1 == col2 ## same column or row1-col1 == row2-col2 ## same \ diagonal or row1+col1 == row2+col2)
See if prefix is in dictionary as a full word or as a prefix. Return two values: the first is the lowest i such that words [ i ]. startswith ( prefix ) or is None ; the second is True iff prefix itself is in the Wordlist.
def lookup(self, prefix, lo=0, hi=None): """See if prefix is in dictionary, as a full word or as a prefix. Return two values: the first is the lowest i such that words[i].startswith(prefix), or is None; the second is True iff prefix itself is in the Wordlist.""" words = self.words if hi is None: hi = len(words) i = bisect.bisect_left(words, prefix, lo, hi) if i < len(words) and words[i].startswith(prefix): return i, (words[i] == prefix) else: return None, False
Set the board and find all the words in it.
def set_board(self, board=None): "Set the board, and find all the words in it." if board is None: board = random_boggle() self.board = board self.neighbors = boggle_neighbors(len(board)) self.found = {} for i in range(len(board)): lo, hi = self.wordlist.bounds[board[i]] self.find(lo, hi, i, [], '') return self
Looking in square i find the words that continue the prefix considering the entries in self. wordlist. words [ lo: hi ] and not revisiting the squares in visited.
def find(self, lo, hi, i, visited, prefix): """Looking in square i, find the words that continue the prefix, considering the entries in self.wordlist.words[lo:hi], and not revisiting the squares in visited.""" if i in visited: return wordpos, is_word = self.wordlist.lookup(prefix, lo, hi) if wordpos is not None: if is_word: self.found[prefix] = True visited.append(i) c = self.board[i] if c == 'Q': c = 'QU' prefix += c for j in self.neighbors[i]: self.find(wordpos, hi, j, visited, prefix) visited.pop()
The total score for the words found according to the rules.
def score(self): "The total score for the words found, according to the rules." return sum([self.scores[len(w)] for w in self.words()])
Wrap the agent s program to print its input and output. This will let you see what the agent is doing in the environment.
def TraceAgent(agent): """Wrap the agent's program to print its input and output. This will let you see what the agent is doing in the environment.""" old_program = agent.program def new_program(percept): action = old_program(percept) print '%s perceives %s and does %s' % (agent, percept, action) return action agent.program = new_program return agent
This agent selects an action based on the percept sequence. It is practical only for tiny domains. To customize it provide as table a dictionary of all { percept_sequence: action } pairs. [ Fig. 2. 7 ]
def TableDrivenAgentProgram(table): """This agent selects an action based on the percept sequence. It is practical only for tiny domains. To customize it, provide as table a dictionary of all {percept_sequence:action} pairs. [Fig. 2.7]""" percepts = [] def program(percept): percepts.append(percept) action = table.get(tuple(percepts)) return action return program
This agent takes action based solely on the percept. [ Fig. 2. 10 ]
def SimpleReflexAgentProgram(rules, interpret_input): "This agent takes action based solely on the percept. [Fig. 2.10]" def program(percept): state = interpret_input(percept) rule = rule_match(state, rules) action = rule.action return action return program
This agent takes action based on the percept and state. [ Fig. 2. 12 ]
def ModelBasedReflexAgentProgram(rules, update_state): "This agent takes action based on the percept and state. [Fig. 2.12]" def program(percept): program.state = update_state(program.state, program.action, percept) rule = rule_match(program.state, rules) action = rule.action return action program.state = program.action = None return program
[ Fig. 2. 3 ]
def TableDrivenVacuumAgent(): "[Fig. 2.3]" table = {((loc_A, 'Clean'),): 'Right', ((loc_A, 'Dirty'),): 'Suck', ((loc_B, 'Clean'),): 'Left', ((loc_B, 'Dirty'),): 'Suck', ((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right', ((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck', # ... ((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right', ((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck', # ... } return Agent(TableDrivenAgentProgram(table))
A reflex agent for the two - state vacuum environment. [ Fig. 2. 8 ]
def ReflexVacuumAgent(): "A reflex agent for the two-state vacuum environment. [Fig. 2.8]" def program((location, status)): if status == 'Dirty': return 'Suck' elif location == loc_A: return 'Right' elif location == loc_B: return 'Left' return Agent(program)
An agent that keeps track of what locations are clean or dirty.
def ModelBasedVacuumAgent(): "An agent that keeps track of what locations are clean or dirty." model = {loc_A: None, loc_B: None} def program((location, status)): "Same as ReflexVacuumAgent, except if everything is clean, do NoOp." model[location] = status ## Update the model here if model[loc_A] == model[loc_B] == 'Clean': return 'NoOp' elif status == 'Dirty': return 'Suck' elif location == loc_A: return 'Right' elif location == loc_B: return 'Left' return Agent(program)
See how well each of several agents do in n instances of an environment. Pass in a factory ( constructor ) for environments and several for agents. Create n instances of the environment and run each agent in copies of each one for steps. Return a list of ( agent average - score ) tuples.
def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000): """See how well each of several agents do in n instances of an environment. Pass in a factory (constructor) for environments, and several for agents. Create n instances of the environment, and run each agent in copies of each one for steps. Return a list of (agent, average-score) tuples.""" envs = [EnvFactory() for i in range(n)] return [(A, test_agent(A, steps, copy.deepcopy(envs))) for A in AgentFactories]
Run the environment for one time step. If the actions and exogenous changes are independent this method will do. If there are interactions between them you ll need to override this method.
def step(self): """Run the environment for one time step. If the actions and exogenous changes are independent, this method will do. If there are interactions between them, you'll need to override this method.""" if not self.is_done(): actions = [agent.program(self.percept(agent)) for agent in self.agents] for (agent, action) in zip(self.agents, actions): self.execute_action(agent, action) self.exogenous_change()
Run the Environment for given number of time steps.
def run(self, steps=1000): "Run the Environment for given number of time steps." for step in range(steps): if self.is_done(): return self.step()
Return all things exactly at a given location.
def list_things_at(self, location, tclass=Thing): "Return all things exactly at a given location." return [thing for thing in self.things if thing.location == location and isinstance(thing, tclass)]
Add a thing to the environment setting its location. For convenience if thing is an agent program we make a new agent for it. ( Shouldn t need to override this.
def add_thing(self, thing, location=None): """Add a thing to the environment, setting its location. For convenience, if thing is an agent program we make a new agent for it. (Shouldn't need to override this.""" if not isinstance(thing, Thing): thing = Agent(thing) assert thing not in self.things, "Don't add the same thing twice" thing.location = location or self.default_location(thing) self.things.append(thing) if isinstance(thing, Agent): thing.performance = 0 self.agents.append(thing)
Remove a thing from the environment.
def delete_thing(self, thing): """Remove a thing from the environment.""" try: self.things.remove(thing) except ValueError, e: print e print " in Environment delete_thing" print " Thing to be removed: %s at %s" % (thing, thing.location) print " from list: %s" % [(thing, thing.location) for thing in self.things] if thing in self.agents: self.agents.remove(thing)
Return all things within radius of location.
def things_near(self, location, radius=None): "Return all things within radius of location." if radius is None: radius = self.perceptible_distance radius2 = radius * radius return [thing for thing in self.things if distance2(location, thing.location) <= radius2]
By default agent perceives things within a default radius.
def percept(self, agent): "By default, agent perceives things within a default radius." return [self.thing_percept(thing, agent) for thing in self.things_near(agent.location)]
Move a thing to a new location.
def move_to(self, thing, destination): "Move a thing to a new location." thing.bump = self.some_things_at(destination, Obstacle) if not thing.bump: thing.location = destination for o in self.observers: o.thing_moved(thing)
Put walls around the entire perimeter of the grid.
def add_walls(self): "Put walls around the entire perimeter of the grid." for x in range(self.width): self.add_thing(Wall(), (x, 0)) self.add_thing(Wall(), (x, self.height-1)) for y in range(self.height): self.add_thing(Wall(), (0, y)) self.add_thing(Wall(), (self.width-1, y))
The percept is a tuple of ( Dirty or Clean Bump or None ). Unlike the TrivialVacuumEnvironment location is NOT perceived.
def percept(self, agent): """The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None'). Unlike the TrivialVacuumEnvironment, location is NOT perceived.""" status = if_(self.some_things_at(agent.location, Dirt), 'Dirty', 'Clean') bump = if_(agent.bump, 'Bump', 'None') return (status, bump)
Change agent s location and/ or location s status ; track performance. Score 10 for each dirt cleaned ; - 1 for each move.
def execute_action(self, agent, action): """Change agent's location and/or location's status; track performance. Score 10 for each dirt cleaned; -1 for each move.""" if action == 'Right': agent.location = loc_B agent.performance -= 1 elif action == 'Left': agent.location = loc_A agent.performance -= 1 elif action == 'Suck': if self.status[agent.location] == 'Dirty': agent.performance += 10 self.status[agent.location] = 'Clean'
Create a dictionary mapping symbols to alternative sequences. >>> Rules ( A = B C | D E ) { A: [[ B C ] [ D E ]] }
def Rules(**rules): """Create a dictionary mapping symbols to alternative sequences. >>> Rules(A = "B C | D E") {'A': [['B', 'C'], ['D', 'E']]} """ for (lhs, rhs) in rules.items(): rules[lhs] = [alt.strip().split() for alt in rhs.split('|')] return rules
Create a dictionary mapping symbols to alternative words. >>> Lexicon ( Art = the | a | an ) { Art: [ the a an ] }
def Lexicon(**rules): """Create a dictionary mapping symbols to alternative words. >>> Lexicon(Art = "the | a | an") {'Art': ['the', 'a', 'an']} """ for (lhs, rhs) in rules.items(): rules[lhs] = [word.strip() for word in rhs.split('|')] return rules
Replace each token in s by a random entry in grammar ( recursively ). This is useful for testing a grammar e. g. generate_random ( E_ )
def generate_random(grammar=E_, s='S'): """Replace each token in s by a random entry in grammar (recursively). This is useful for testing a grammar, e.g. generate_random(E_)""" import random def rewrite(tokens, into): for token in tokens: if token in grammar.rules: rewrite(random.choice(grammar.rules[token]), into) elif token in grammar.lexicon: into.append(random.choice(grammar.lexicon[token])) else: into.append(token) return into return ' '.join(rewrite(s.split(), []))
Return a list of parses ; words can be a list or string. >>> chart = Chart ( E_NP_ ) >>> chart. parses ( happy man NP ) [[ 0 2 NP [ ( Adj happy ) [ 1 2 NP [ ( N man ) ] [] ]] [] ]]
def parses(self, words, S='S'): """Return a list of parses; words can be a list or string. >>> chart = Chart(E_NP_) >>> chart.parses('happy man', 'NP') [[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]] """ if isinstance(words, str): words = words.split() self.parse(words, S) # Return all the parses that span the whole input # 'span the whole input' => begin at 0, end at len(words) return [[i, j, S, found, []] for (i, j, lhs, found, expects) in self.chart[len(words)] # assert j == len(words) if i == 0 and lhs == S and expects == []]
Parse a list of words ; according to the grammar. Leave results in the chart.
def parse(self, words, S='S'): """Parse a list of words; according to the grammar. Leave results in the chart.""" self.chart = [[] for i in range(len(words)+1)] self.add_edge([0, 0, 'S_', [], [S]]) for i in range(len(words)): self.scanner(i, words[i]) return self.chart
Add edge to chart and see if it extends or predicts another edge.
def add_edge(self, edge): "Add edge to chart, and see if it extends or predicts another edge." start, end, lhs, found, expects = edge if edge not in self.chart[end]: self.chart[end].append(edge) if self.trace: print '%10s: added %s' % (caller(2), edge) if not expects: self.extender(edge) else: self.predictor(edge)