INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Given an associative op return an expression with the same meaning as Expr ( op * args ) but flattened -- that is with nested instances of the same op promoted to the top level. >>> associate ( & [ ( A&B ) ( B|C ) ( B&C ) ] ) ( A & B & ( B | C ) & B & C ) >>> associate ( | [ A| ( B| ( C| ( A&B ))) ] ) ( A | B | C | ( A & B ))
|
def associate(op, args):
"""Given an associative op, return an expression with the same
meaning as Expr(op, *args), but flattened -- that is, with nested
instances of the same op promoted to the top level.
>>> associate('&', [(A&B),(B|C),(B&C)])
(A & B & (B | C) & B & C)
>>> associate('|', [A|(B|(C|(A&B)))])
(A | B | C | (A & B))
"""
args = dissociate(op, args)
if len(args) == 0:
return _op_identity[op]
elif len(args) == 1:
return args[0]
else:
return Expr(op, *args)
|
Given an associative op return a flattened list result such that Expr ( op * result ) means the same as Expr ( op * args ).
|
def dissociate(op, args):
"""Given an associative op, return a flattened list result such
that Expr(op, *result) means the same as Expr(op, *args)."""
result = []
def collect(subargs):
for arg in subargs:
if arg.op == op: collect(arg.args)
else: result.append(arg)
collect(args)
return result
|
Propositional - logic resolution: say if alpha follows from KB. [ Fig. 7. 12 ]
|
def pl_resolution(KB, alpha):
"Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]"
clauses = KB.clauses + conjuncts(to_cnf(~alpha))
new = set()
while True:
n = len(clauses)
pairs = [(clauses[i], clauses[j])
for i in range(n) for j in range(i+1, n)]
for (ci, cj) in pairs:
resolvents = pl_resolve(ci, cj)
if FALSE in resolvents: return True
new = new.union(set(resolvents))
if new.issubset(set(clauses)): return False
for c in new:
if c not in clauses: clauses.append(c)
|
Return all clauses that can be obtained by resolving clauses ci and cj. >>> for res in pl_resolve ( to_cnf ( A|B|C ) to_cnf ( ~B|~C|F )):... ppset ( disjuncts ( res )) set ( [ A C F ~C ] ) set ( [ A B F ~B ] )
|
def pl_resolve(ci, cj):
"""Return all clauses that can be obtained by resolving clauses ci and cj.
>>> for res in pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)):
... ppset(disjuncts(res))
set([A, C, F, ~C])
set([A, B, F, ~B])
"""
clauses = []
for di in disjuncts(ci):
for dj in disjuncts(cj):
if di == ~dj or ~di == dj:
dnew = unique(removeall(di, disjuncts(ci)) +
removeall(dj, disjuncts(cj)))
clauses.append(associate('|', dnew))
return clauses
|
Use forward chaining to see if a PropDefiniteKB entails symbol q. [ Fig. 7. 15 ] >>> pl_fc_entails ( Fig [ 7 15 ] expr ( Q )) True
|
def pl_fc_entails(KB, q):
"""Use forward chaining to see if a PropDefiniteKB entails symbol q.
[Fig. 7.15]
>>> pl_fc_entails(Fig[7,15], expr('Q'))
True
"""
count = dict([(c, len(conjuncts(c.args[0]))) for c in KB.clauses
if c.op == '>>'])
inferred = DefaultDict(False)
agenda = [s for s in KB.clauses if is_prop_symbol(s.op)]
while agenda:
p = agenda.pop()
if p == q: return True
if not inferred[p]:
inferred[p] = True
for c in KB.clauses_with_premise(p):
count[c] -= 1
if count[c] == 0:
agenda.append(c.args[1])
return False
|
Check satisfiability of a propositional sentence. This differs from the book code in two ways: ( 1 ) it returns a model rather than True when it succeeds ; this is more useful. ( 2 ) The function find_pure_symbol is passed a list of unknown clauses rather than a list of all clauses and the model ; this is more efficient. >>> ppsubst ( dpll_satisfiable ( A&~B )) { A: True B: False } >>> dpll_satisfiable ( P&~P ) False
|
def dpll_satisfiable(s):
"""Check satisfiability of a propositional sentence.
This differs from the book code in two ways: (1) it returns a model
rather than True when it succeeds; this is more useful. (2) The
function find_pure_symbol is passed a list of unknown clauses, rather
than a list of all clauses and the model; this is more efficient.
>>> ppsubst(dpll_satisfiable(A&~B))
{A: True, B: False}
>>> dpll_satisfiable(P&~P)
False
"""
clauses = conjuncts(to_cnf(s))
symbols = prop_symbols(s)
return dpll(clauses, symbols, {})
|
See if the clauses are true in a partial model.
|
def dpll(clauses, symbols, model):
"See if the clauses are true in a partial model."
unknown_clauses = [] ## clauses with an unknown truth value
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P, value = find_pure_symbol(symbols, unknown_clauses)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, value = find_unit_clause(clauses, model)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, symbols = symbols[0], symbols[1:]
return (dpll(clauses, symbols, extend(model, P, True)) or
dpll(clauses, symbols, extend(model, P, False)))
|
Find a symbol and its value if it appears only as a positive literal ( or only as a negative ) in clauses. >>> find_pure_symbol ( [ A B C ] [ A|~B ~B|~C C|A ] ) ( A True )
|
def find_pure_symbol(symbols, clauses):
"""Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A])
(A, True)
"""
for s in symbols:
found_pos, found_neg = False, False
for c in clauses:
if not found_pos and s in disjuncts(c): found_pos = True
if not found_neg and ~s in disjuncts(c): found_neg = True
if found_pos != found_neg: return s, found_pos
return None, None
|
Find a forced assignment if possible from a clause with only 1 variable not bound in the model. >>> find_unit_clause ( [ A|B|C B|~C ~A|~B ] { A: True } ) ( B False )
|
def find_unit_clause(clauses, model):
"""Find a forced assignment if possible from a clause with only 1
variable not bound in the model.
>>> find_unit_clause([A|B|C, B|~C, ~A|~B], {A:True})
(B, False)
"""
for clause in clauses:
P, value = unit_clause_assign(clause, model)
if P: return P, value
return None, None
|
Return a single variable/ value pair that makes clause true in the model if possible. >>> unit_clause_assign ( A|B|C { A: True } ) ( None None ) >>> unit_clause_assign ( B|~C { A: True } ) ( None None ) >>> unit_clause_assign ( ~A|~B { A: True } ) ( B False )
|
def unit_clause_assign(clause, model):
"""Return a single variable/value pair that makes clause true in
the model, if possible.
>>> unit_clause_assign(A|B|C, {A:True})
(None, None)
>>> unit_clause_assign(B|~C, {A:True})
(None, None)
>>> unit_clause_assign(~A|~B, {A:True})
(B, False)
"""
P, value = None, None
for literal in disjuncts(clause):
sym, positive = inspect_literal(literal)
if sym in model:
if model[sym] == positive:
return None, None # clause already True
elif P:
return None, None # more than 1 unbound variable
else:
P, value = sym, positive
return P, value
|
[ Fig. 7. 22 ]
|
def SAT_plan(init, transition, goal, t_max, SAT_solver=dpll_satisfiable):
"[Fig. 7.22]"
for t in range(t_max):
cnf = translate_to_SAT(init, transition, goal, t)
model = SAT_solver(cnf)
if model is not False:
return extract_solution(model)
return None
|
Unify expressions x y with substitution s ; return a substitution that would make x y equal or None if x y can not unify. x and y can be variables ( e. g. Expr ( x )) constants lists or Exprs. [ Fig. 9. 1 ] >>> ppsubst ( unify ( x + y y + C {} )) { x: y y: C }
|
def unify(x, y, s):
"""Unify expressions x,y with substitution s; return a substitution that
would make x,y equal, or None if x,y can not unify. x and y can be
variables (e.g. Expr('x')), constants, lists, or Exprs. [Fig. 9.1]
>>> ppsubst(unify(x + y, y + C, {}))
{x: y, y: C}
"""
if s is None:
return None
elif x == y:
return s
elif is_variable(x):
return unify_var(x, y, s)
elif is_variable(y):
return unify_var(y, x, s)
elif isinstance(x, Expr) and isinstance(y, Expr):
return unify(x.args, y.args, unify(x.op, y.op, s))
elif isinstance(x, str) or isinstance(y, str):
return None
elif issequence(x) and issequence(y) and len(x) == len(y):
if not x: return s
return unify(x[1:], y[1:], unify(x[0], y[0], s))
else:
return None
|
A variable is an Expr with no args and a lowercase symbol as the op.
|
def is_variable(x):
"A variable is an Expr with no args and a lowercase symbol as the op."
return isinstance(x, Expr) and not x.args and is_var_symbol(x.op)
|
Return true if variable var occurs anywhere in x ( or in subst ( s x ) if s has a binding for x ).
|
def occur_check(var, x, s):
"""Return true if variable var occurs anywhere in x
(or in subst(s, x), if s has a binding for x)."""
if var == x:
return True
elif is_variable(x) and x in s:
return occur_check(var, s[x], s)
elif isinstance(x, Expr):
return (occur_check(var, x.op, s) or
occur_check(var, x.args, s))
elif isinstance(x, (list, tuple)):
return some(lambda element: occur_check(var, element, s), x)
else:
return False
|
Copy the substitution s and extend it by setting var to val ; return copy. >>> ppsubst ( extend ( { x: 1 } y 2 )) { x: 1 y: 2 }
|
def extend(s, var, val):
"""Copy the substitution s and extend it by setting var to val;
return copy.
>>> ppsubst(extend({x: 1}, y, 2))
{x: 1, y: 2}
"""
s2 = s.copy()
s2[var] = val
return s2
|
Substitute the substitution s into the expression x. >>> subst ( { x: 42 y: 0 } F ( x ) + y ) ( F ( 42 ) + 0 )
|
def subst(s, x):
"""Substitute the substitution s into the expression x.
>>> subst({x: 42, y:0}, F(x) + y)
(F(42) + 0)
"""
if isinstance(x, list):
return [subst(s, xi) for xi in x]
elif isinstance(x, tuple):
return tuple([subst(s, xi) for xi in x])
elif not isinstance(x, Expr):
return x
elif is_var_symbol(x.op):
return s.get(x, x)
else:
return Expr(x.op, *[subst(s, arg) for arg in x.args])
|
Inefficient forward chaining for first - order logic. [ Fig. 9. 3 ] KB is a FolKB and alpha must be an atomic sentence.
|
def fol_fc_ask(KB, alpha):
"""Inefficient forward chaining for first-order logic. [Fig. 9.3]
KB is a FolKB and alpha must be an atomic sentence."""
while True:
new = {}
for r in KB.clauses:
ps, q = parse_definite_clause(standardize_variables(r))
raise NotImplementedError
|
Replace all the variables in sentence with new variables. >>> e = expr ( F ( a b c ) & G ( c A 23 ) ) >>> len ( variables ( standardize_variables ( e ))) 3 >>> variables ( e ). intersection ( variables ( standardize_variables ( e ))) set ( [] ) >>> is_variable ( standardize_variables ( expr ( x ))) True
|
def standardize_variables(sentence, dic=None):
"""Replace all the variables in sentence with new variables.
>>> e = expr('F(a, b, c) & G(c, A, 23)')
>>> len(variables(standardize_variables(e)))
3
>>> variables(e).intersection(variables(standardize_variables(e)))
set([])
>>> is_variable(standardize_variables(expr('x')))
True
"""
if dic is None: dic = {}
if not isinstance(sentence, Expr):
return sentence
elif is_var_symbol(sentence.op):
if sentence in dic:
return dic[sentence]
else:
v = Expr('v_%d' % standardize_variables.counter.next())
dic[sentence] = v
return v
else:
return Expr(sentence.op,
*[standardize_variables(a, dic) for a in sentence.args])
|
Return the symbolic derivative dy/ dx as an Expr. However you probably want to simplify the results with simp. >>> diff ( x * x x ) (( x * 1 ) + ( x * 1 )) >>> simp ( diff ( x * x x )) ( 2 * x )
|
def diff(y, x):
"""Return the symbolic derivative, dy/dx, as an Expr.
However, you probably want to simplify the results with simp.
>>> diff(x * x, x)
((x * 1) + (x * 1))
>>> simp(diff(x * x, x))
(2 * x)
"""
if y == x: return ONE
elif not y.args: return ZERO
else:
u, op, v = y.args[0], y.op, y.args[-1]
if op == '+': return diff(u, x) + diff(v, x)
elif op == '-' and len(args) == 1: return -diff(u, x)
elif op == '-': return diff(u, x) - diff(v, x)
elif op == '*': return u * diff(v, x) + v * diff(u, x)
elif op == '/': return (v*diff(u, x) - u*diff(v, x)) / (v * v)
elif op == '**' and isnumber(x.op):
return (v * u ** (v - 1) * diff(u, x))
elif op == '**': return (v * u ** (v - 1) * diff(u, x)
+ u ** v * Expr('log')(u) * diff(v, x))
elif op == 'log': return diff(u, x) / u
else: raise ValueError("Unknown op: %s in diff(%s, %s)" % (op, y, x))
|
Return dictionary d s repr but with the items sorted. >>> pretty_dict ( { m: M a: A r: R k: K } ) { a: A k: K m: M r: R } >>> pretty_dict ( { z: C y: B x: A } ) { x: A y: B z: C }
|
def pretty_dict(d):
"""Return dictionary d's repr but with the items sorted.
>>> pretty_dict({'m': 'M', 'a': 'A', 'r': 'R', 'k': 'K'})
"{'a': 'A', 'k': 'K', 'm': 'M', 'r': 'R'}"
>>> pretty_dict({z: C, y: B, x: A})
'{x: A, y: B, z: C}'
"""
return '{%s}' % ', '.join('%r: %r' % (k, v)
for k, v in sorted(d.items(), key=repr))
|
Remove the sentence s clauses from the KB.
|
def retract(self, sentence):
"Remove the sentence's clauses from the KB."
for c in conjuncts(to_cnf(sentence)):
if c in self.clauses:
self.clauses.remove(c)
|
Return a list of the clauses in KB that have p in their premise. This could be cached away for O ( 1 ) speed but we ll recompute it.
|
def clauses_with_premise(self, p):
"""Return a list of the clauses in KB that have p in their premise.
This could be cached away for O(1) speed, but we'll recompute it."""
return [c for c in self.clauses
if c.op == '>>' and p in conjuncts(c.args[0])]
|
Updates the cache with setting values from the database.
|
def refresh(self):
"""
Updates the cache with setting values from the database.
"""
# `values_list('name', 'value')` doesn't work because `value` is not a
# setting (base class) field, it's a setting value (subclass) field. So
# we have to get real instances.
args = [(obj.name, obj.value) for obj in self.queryset.all()]
super(SettingDict, self).update(args)
self.empty_cache = False
|
Given a state in a game calculate the best move by searching forward all the way to the terminal states. [ Fig. 5. 3 ]
|
def minimax_decision(state, game):
"""Given a state in a game, calculate the best move by searching
forward all the way to the terminal states. [Fig. 5.3]"""
player = game.to_move(state)
def max_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a)))
return v
def min_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a)))
return v
# Body of minimax_decision:
return argmax(game.actions(state),
lambda a: min_value(game.result(state, a)))
|
Search game to determine best action ; use alpha - beta pruning. This version cuts off search and uses an evaluation function.
|
def alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None):
"""Search game to determine best action; use alpha-beta pruning.
This version cuts off search and uses an evaluation function."""
player = game.to_move(state)
def max_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a),
alpha, beta, depth+1))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a),
alpha, beta, depth+1))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search starts here:
# The default test cuts off at depth d or at a terminal state
cutoff_test = (cutoff_test or
(lambda state,depth: depth>d or game.terminal_test(state)))
eval_fn = eval_fn or (lambda state: game.utility(state, player))
return argmax(game.actions(state),
lambda a: min_value(game.result(state, a),
-infinity, infinity, 0))
|
Play an n - person move - alternating game. >>> play_game ( Fig52Game () alphabeta_player alphabeta_player ) 3
|
def play_game(game, *players):
"""Play an n-person, move-alternating game.
>>> play_game(Fig52Game(), alphabeta_player, alphabeta_player)
3
"""
state = game.initial
while True:
for player in players:
move = player(game, state)
state = game.result(state, move)
if game.terminal_test(state):
return game.utility(state, game.to_move(game.initial))
|
Return the value to player ; 1 for win - 1 for loss 0 otherwise.
|
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
return if_(player == 'X', state.utility, -state.utility)
|
If X wins with this move return 1 ; if O return - 1 ; else return 0.
|
def compute_utility(self, board, move, player):
"If X wins with this move, return 1; if O return -1; else return 0."
if (self.k_in_row(board, move, player, (0, 1)) or
self.k_in_row(board, move, player, (1, 0)) or
self.k_in_row(board, move, player, (1, -1)) or
self.k_in_row(board, move, player, (1, 1))):
return if_(player == 'X', +1, -1)
else:
return 0
|
Return true if there is a line through move on board for player.
|
def k_in_row(self, board, move, player, (delta_x, delta_y)):
"Return true if there is a line through move on board for player."
x, y = move
n = 0 # n is number of moves in row
while board.get((x, y)) == player:
n += 1
x, y = x + delta_x, y + delta_y
x, y = move
while board.get((x, y)) == player:
n += 1
x, y = x - delta_x, y - delta_y
n -= 1 # Because we counted move itself twice
return n >= self.k
|
Update a dict or an object with slots according to entries dict.
|
def update(x, **entries):
"""Update a dict, or an object with slots, according to `entries` dict.
>>> update({'a': 1}, a=10, b=20)
{'a': 10, 'b': 20}
>>> update(Struct(a=1), a=10, b=20)
Struct(a=10, b=20)
"""
if isinstance(x, dict):
x.update(entries)
else:
x.__dict__.update(entries)
return x
|
Return a copy of seq ( or string ) with all occurences of item removed. >>> removeall ( 3 [ 1 2 3 3 2 1 3 ] ) [ 1 2 2 1 ] >>> removeall ( 4 [ 1 2 3 ] ) [ 1 2 3 ]
|
def removeall(item, seq):
"""Return a copy of seq (or string) with all occurences of item removed.
>>> removeall(3, [1, 2, 3, 3, 2, 1, 3])
[1, 2, 2, 1]
>>> removeall(4, [1, 2, 3])
[1, 2, 3]
"""
if isinstance(seq, str):
return seq.replace(item, '')
else:
return [x for x in seq if x != item]
|
Count the number of elements of seq for which the predicate is true. >>> count_if ( callable [ 42 None max min ] ) 2
|
def count_if(predicate, seq):
"""Count the number of elements of seq for which the predicate is true.
>>> count_if(callable, [42, None, max, min])
2
"""
f = lambda count, x: count + (not not predicate(x))
return reduce(f, seq, 0)
|
If some element x of seq satisfies predicate ( x ) return predicate ( x ). >>> some ( callable [ min 3 ] ) 1 >>> some ( callable [ 2 3 ] ) 0
|
def some(predicate, seq):
"""If some element x of seq satisfies predicate(x), return predicate(x).
>>> some(callable, [min, 3])
1
>>> some(callable, [2, 3])
0
"""
for x in seq:
px = predicate(x)
if px: return px
return False
|
Return an element with lowest fn ( seq [ i ] ) score ; tie goes to first one. >>> argmin ( [ one to three ] len ) to
|
def argmin(seq, fn):
"""Return an element with lowest fn(seq[i]) score; tie goes to first one.
>>> argmin(['one', 'to', 'three'], len)
'to'
"""
best = seq[0]; best_score = fn(best)
for x in seq:
x_score = fn(x)
if x_score < best_score:
best, best_score = x, x_score
return best
|
Return a list of elements of seq [ i ] with the lowest fn ( seq [ i ] ) scores. >>> argmin_list ( [ one to three or ] len ) [ to or ]
|
def argmin_list(seq, fn):
"""Return a list of elements of seq[i] with the lowest fn(seq[i]) scores.
>>> argmin_list(['one', 'to', 'three', 'or'], len)
['to', 'or']
"""
best_score, best = fn(seq[0]), []
for x in seq:
x_score = fn(x)
if x_score < best_score:
best, best_score = [x], x_score
elif x_score == best_score:
best.append(x)
return best
|
Return an element with lowest fn ( seq [ i ] ) score ; break ties at random. Thus for all s f: argmin_random_tie ( s f ) in argmin_list ( s f )
|
def argmin_random_tie(seq, fn):
"""Return an element with lowest fn(seq[i]) score; break ties at random.
Thus, for all s,f: argmin_random_tie(s, f) in argmin_list(s, f)"""
best_score = fn(seq[0]); n = 0
for x in seq:
x_score = fn(x)
if x_score < best_score:
best, best_score = x, x_score; n = 1
elif x_score == best_score:
n += 1
if random.randrange(n) == 0:
best = x
return best
|
Return a list of ( value count ) pairs summarizing the input values. Sorted by increasing value or if mode = 1 by decreasing count. If bin_function is given map it over values first.
|
def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function: values = map(bin_function, values)
bins = {}
for val in values:
bins[val] = bins.get(val, 0) + 1
if mode:
return sorted(bins.items(), key=lambda x: (x[1],x[0]), reverse=True)
else:
return sorted(bins.items())
|
Return the middle value when the values are sorted. If there are an odd number of elements try to average the middle two. If they can t be averaged ( e. g. they are strings ) choose one at random. >>> median ( [ 10 100 11 ] ) 11 >>> median ( [ 1 2 3 4 ] ) 2. 5
|
def median(values):
"""Return the middle value, when the values are sorted.
If there are an odd number of elements, try to average the middle two.
If they can't be averaged (e.g. they are strings), choose one at random.
>>> median([10, 100, 11])
11
>>> median([1, 2, 3, 4])
2.5
"""
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[n/2]
else:
middle2 = values[(n/2)-1:(n/2)+1]
try:
return mean(middle2)
except TypeError:
return random.choice(middle2)
|
Return the sum of the element - wise product of vectors x and y. >>> dotproduct ( [ 1 2 3 ] [ 1000 100 10 ] ) 1230
|
def dotproduct(X, Y):
"""Return the sum of the element-wise product of vectors x and y.
>>> dotproduct([1, 2, 3], [1000, 100, 10])
1230
"""
return sum([x * y for x, y in zip(X, Y)])
|
Pick n samples from seq at random with replacement with the probability of each element in proportion to its corresponding weight.
|
def weighted_sample_with_replacement(seq, weights, n):
"""Pick n samples from seq at random, with replacement, with the
probability of each element in proportion to its corresponding
weight."""
sample = weighted_sampler(seq, weights)
return [sample() for s in range(n)]
|
Return a random - sample function that picks from seq weighted by weights.
|
def weighted_sampler(seq, weights):
"Return a random-sample function that picks from seq weighted by weights."
totals = []
for w in weights:
totals.append(w + totals[-1] if totals else w)
return lambda: seq[bisect.bisect(totals, random.uniform(0, totals[-1]))]
|
The argument is a string ; convert to a number if possible or strip it. >>> num_or_str ( 42 ) 42 >>> num_or_str ( 42x ) 42x
|
def num_or_str(x):
"""The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x'
"""
if isnumber(x): return x
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip()
|
Multiply each number by a constant such that the sum is 1. 0 >>> normalize ( [ 1 2 1 ] ) [ 0. 25 0. 5 0. 25 ]
|
def normalize(numbers):
"""Multiply each number by a constant such that the sum is 1.0
>>> normalize([1,2,1])
[0.25, 0.5, 0.25]
"""
total = float(sum(numbers))
return [n / total for n in numbers]
|
The distance between two ( x y ) points.
|
def distance((ax, ay), (bx, by)):
"The distance between two (x, y) points."
return math.hypot((ax - bx), (ay - by))
|
Return vector except if any element is less than the corresponding value of lowest or more than the corresponding value of highest clip to those values. >>> vector_clip (( - 1 10 ) ( 0 0 ) ( 9 9 )) ( 0 9 )
|
def vector_clip(vector, lowest, highest):
"""Return vector, except if any element is less than the corresponding
value of lowest or more than the corresponding value of highest, clip to
those values.
>>> vector_clip((-1, 10), (0, 0), (9, 9))
(0, 9)
"""
return type(vector)(map(clip, vector, lowest, highest))
|
Format args with the first argument as format string and write. Return the last arg or format itself if there are no args.
|
def printf(format, *args):
"""Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args."""
sys.stdout.write(str(format) % args)
return if_(args, lambda: args[-1], lambda: format)
|
Memoize fn: make it remember the computed value for any argument list. If slot is specified store result in that slot of first argument. If slot is false store results in a dictionary.
|
def memoize(fn, slot=None):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, store results in a dictionary."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
def memoized_fn(*args):
if not memoized_fn.cache.has_key(args):
memoized_fn.cache[args] = fn(*args)
return memoized_fn.cache[args]
memoized_fn.cache = {}
return memoized_fn
|
Like C ++ and Java s ( test ? result: alternative ) except both result and alternative are always evaluated. However if either evaluates to a function it is applied to the empty arglist so you can delay execution by putting it in a lambda. >>> if_ ( 2 + 2 == 4 ok lambda: expensive_computation () ) ok
|
def if_(test, result, alternative):
"""Like C++ and Java's (test ? result : alternative), except
both result and alternative are always evaluated. However, if
either evaluates to a function, it is applied to the empty arglist,
so you can delay execution by putting it in a lambda.
>>> if_(2 + 2 == 4, 'ok', lambda: expensive_computation())
'ok'
"""
if test:
if callable(result): return result()
return result
else:
if callable(alternative): return alternative()
return alternative
|
Try to find some reasonable name for the object.
|
def name(object):
"Try to find some reasonable name for the object."
return (getattr(object, 'name', 0) or getattr(object, '__name__', 0)
or getattr(getattr(object, '__class__', 0), '__name__', 0)
or str(object))
|
Print a list of lists as a table so that columns line up nicely. header if specified will be printed as the first row. numfmt is the format for all numbers ; you might want e. g. %6. 2f. ( If you want different formats in different columns don t use print_table. ) sep is the separator between columns.
|
def print_table(table, header=None, sep=' ', numfmt='%g'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '%6.2f'.
(If you want different formats in different columns, don't use print_table.)
sep is the separator between columns."""
justs = [if_(isnumber(x), 'rjust', 'ljust') for x in table[0]]
if header:
table = [header] + table
table = [[if_(isnumber(x), lambda: numfmt % x, lambda: x) for x in row]
for row in table]
maxlen = lambda seq: max(map(len, seq))
sizes = map(maxlen, zip(*[map(str, row) for row in table]))
for row in table:
print sep.join(getattr(str(x), j)(size)
for (j, size, x) in zip(justs, sizes, row))
|
Open a file based at the AIMA root directory.
|
def AIMAFile(components, mode='r'):
"Open a file based at the AIMA root directory."
import utils
dir = os.path.dirname(utils.__file__)
return open(apply(os.path.join, [dir] + components), mode)
|
r Input is a string consisting of lines each line has comma - delimited fields. Convert this into a list of lists. Blank lines are skipped. Fields that look like numbers are converted to numbers. The delim defaults to but \ t and None are also reasonable values. >>> parse_csv ( 1 2 3 \ n 0 2 na ) [[ 1 2 3 ] [ 0 2 na ]]
|
def parse_csv(input, delim=','):
r"""Input is a string consisting of lines, each line has comma-delimited
fields. Convert this into a list of lists. Blank lines are skipped.
Fields that look like numbers are converted to numbers.
The delim defaults to ',' but '\t' and None are also reasonable values.
>>> parse_csv('1, 2, 3 \n 0, 2, na')
[[1, 2, 3], [0, 2, 'na']]
"""
lines = [line for line in input.splitlines() if line.strip()]
return [map(num_or_str, line.split(delim)) for line in lines]
|
A very dumb algorithm: always pick the result that was most popular in the training data. Makes a baseline for comparison.
|
def PluralityLearner(dataset):
"""A very dumb algorithm: always pick the result that was most popular
in the training data. Makes a baseline for comparison."""
most_popular = mode([e[dataset.target] for e in dataset.examples])
def predict(example):
"Always return same result: the most popular from the training set."
return most_popular
return predict
|
Just count how many times each value of each input attribute occurs conditional on the target value. Count the different target values too.
|
def NaiveBayesLearner(dataset):
"""Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too."""
targetvals = dataset.values[dataset.target]
target_dist = CountingProbDist(targetvals)
attr_dists = dict(((gv, attr), CountingProbDist(dataset.values[attr]))
for gv in targetvals
for attr in dataset.inputs)
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval]
* product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(targetvals, class_probability)
return predict
|
k - NearestNeighbor: the k nearest neighbors vote.
|
def NearestNeighborLearner(dataset, k=1):
"k-NearestNeighbor: the k nearest neighbors vote."
def predict(example):
"Find the k closest, and have them vote for the best."
best = heapq.nsmallest(k, ((dataset.distance(e, example), e)
for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
|
[ Fig. 18. 5 ]
|
def DecisionTreeLearner(dataset):
"[Fig. 18.5]"
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
elif all_same_class(examples):
return DecisionLeaf(examples[0][target])
elif len(attrs) == 0:
return plurality_value(examples)
else:
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attrnames[A])
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(
exs, removeall(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality.)"""
popular = argmax_random_tie(values[target],
lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
return count_if(lambda e: e[attr] == val, examples)
def all_same_class(examples):
"Are all these examples in the same target class?"
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"Choose the attribute with the highest information gain."
return argmax_random_tie(attrs,
lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"Return the expected reduction in entropy from splitting by attr."
def I(examples):
return information_content([count(target, v, examples)
for v in values[target]])
N = float(len(examples))
remainder = sum((len(examples_i) / N) * I(examples_i)
for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"Return a list of (val, examples) pairs for each val of attr."
return [(v, [e for e in examples if e[attr] == v])
for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
|
Number of bits to represent the probability distribution in values.
|
def information_content(values):
"Number of bits to represent the probability distribution in values."
probabilities = normalize(removeall(0, values))
return sum(-p * log2(p) for p in probabilities)
|
[ Fig. 18. 11 ]
|
def DecisionListLearner(dataset):
"""[Fig. 18.11]"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Failure
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples."""
unimplemented()
def passes(example, test):
"Does the example pass the test?"
unimplemented()
def predict(example):
"Predict the outcome for the first passing test."
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
|
Layered feed - forward network.
|
def NeuralNetLearner(dataset, sizes):
"""Layered feed-forward network."""
activations = map(lambda n: [0.0 for i in range(n)], sizes)
weights = []
def predict(example):
unimplemented()
return predict
|
Given a list of learning algorithms have them vote.
|
def EnsembleLearner(learners):
"""Given a list of learning algorithms, have them vote."""
def train(dataset):
predictors = [learner(dataset) for learner in learners]
def predict(example):
return mode(predictor(example) for predictor in predictors)
return predict
return train
|
[ Fig. 18. 34 ]
|
def AdaBoost(L, K):
"""[Fig. 18.34]"""
def train(dataset):
examples, target = dataset.examples, dataset.target
N = len(examples)
epsilon = 1./(2*N)
w = [1./N] * N
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w)
if example[target] != h_k(example))
# Avoid divide-by-0 from either 0% or 100% error rates:
error = clip(error, epsilon, 1-epsilon)
for j, example in enumerate(examples):
if example[target] == h_k(example):
w[j] *= error / (1. - error)
w = normalize(w)
z.append(math.log((1. - error) / error))
return WeightedMajority(h, z)
return train
|
Return a predictor that takes a weighted vote.
|
def WeightedMajority(predictors, weights):
"Return a predictor that takes a weighted vote."
def predict(example):
return weighted_mode((predictor(example) for predictor in predictors),
weights)
return predict
|
Return the value with the greatest total weight. >>> weighted_mode ( abbaa [ 1 2 3 1 2 ] ) b
|
def weighted_mode(values, weights):
"""Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1,2,3,1,2])
'b'"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(totals.keys(), key=totals.get)
|
Given a learner that takes just an unweighted dataset return one that takes also a weight for each example. [ p. 749 footnote 14 ]
|
def WeightedLearner(unweighted_learner):
"""Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example. [p. 749 footnote 14]"""
def train(dataset, weights):
return unweighted_learner(replicated_dataset(dataset, weights))
return train
|
Copy dataset replicating each example in proportion to its weight.
|
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
|
Return n selections from seq with the count of each element of seq proportional to the corresponding weight ( filling in fractions randomly ). >>> weighted_replicate ( ABC [ 1 2 1 ] 4 ) [ A B B C ]
|
def weighted_replicate(seq, weights, n):
"""Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1,2,1], 4)
['A', 'B', 'B', 'C']"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w*n) for w in weights]
fractions = [(w*n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes))
+ weighted_sample_with_replacement(seq, fractions, n - sum(wholes)))
|
Do k - fold cross_validate and return their mean. That is keep out 1/ k of the examples for testing on each of k runs. Shuffle the examples first ; If trials > 1 average over several shuffles.
|
def cross_validation(learner, dataset, k=10, trials=1):
"""Do k-fold cross_validate and return their mean.
That is, keep out 1/k of the examples for testing on each of k runs.
Shuffle the examples first; If trials>1, average over several shuffles."""
if k is None:
k = len(dataset.examples)
if trials > 1:
return mean([cross_validation(learner, dataset, k, trials=1)
for t in range(trials)])
else:
n = len(dataset.examples)
random.shuffle(dataset.examples)
return mean([train_and_test(learner, dataset, i*(n/k), (i+1)*(n/k))
for i in range(k)])
|
Leave one out cross - validation over the dataset.
|
def leave1out(learner, dataset):
"Leave one out cross-validation over the dataset."
return cross_validation(learner, dataset, k=len(dataset.examples))
|
Generate a DataSet with n examples.
|
def SyntheticRestaurant(n=20):
"Generate a DataSet with n examples."
def gen():
example = map(random.choice, restaurant.values)
example[restaurant.target] = Fig[18,2](example)
return example
return RestaurantDataSet([gen() for i in range(n)])
|
Return a DataSet with n k - bit examples of the majority problem: k random bits followed by a 1 if more than half the bits are 1 else 0.
|
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k/2))
examples.append(bits)
return DataSet(name="majority", examples=examples)
|
2 inputs are chosen uniformly from ( 0. 0.. 2. 0 ] ; output is xor of ints.
|
def ContinuousXor(n):
"2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for i in '12']
examples.append([x, y, int(x) != int(y)])
return DataSet(name="continuous xor", examples=examples)
|
Compare various learners on various datasets using cross - validation. Print results as a table.
|
def compare(algorithms=[PluralityLearner, NaiveBayesLearner,
NearestNeighborLearner, DecisionTreeLearner],
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)],
k=10, trials=1):
"""Compare various learners on various datasets using cross-validation.
Print results as a table."""
print_table([[a.__name__.replace('Learner','')] +
[cross_validation(a, d, k, trials) for d in datasets]
for a in algorithms],
header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
|
Set ( or change ) the target and/ or inputs. This way one DataSet can be used multiple ways. inputs if specified is a list of attributes or specify exclude as a list of attributes to not use in inputs. Attributes can be - n.. n or an attrname. Also computes the list of possible values if that wasn t done yet.
|
def setproblem(self, target, inputs=None, exclude=()):
"""Set (or change) the target and/or inputs.
This way, one DataSet can be used multiple ways. inputs, if specified,
is a list of attributes, or specify exclude as a list of attributes
to not use in inputs. Attributes can be -n .. n, or an attrname.
Also computes the list of possible values, if that wasn't done yet."""
self.target = self.attrnum(target)
exclude = map(self.attrnum, exclude)
if inputs:
self.inputs = removeall(self.target, inputs)
else:
self.inputs = [a for a in self.attrs
if a != self.target and a not in exclude]
if not self.values:
self.values = map(unique, zip(*self.examples))
self.check_me()
|
Check that my fields make sense.
|
def check_me(self):
"Check that my fields make sense."
assert len(self.attrnames) == len(self.attrs)
assert self.target in self.attrs
assert self.target not in self.inputs
assert set(self.inputs).issubset(set(self.attrs))
map(self.check_example, self.examples)
|
Add an example to the list of examples checking it first.
|
def add_example(self, example):
"Add an example to the list of examples, checking it first."
self.check_example(example)
self.examples.append(example)
|
Raise ValueError if example has any invalid values.
|
def check_example(self, example):
"Raise ValueError if example has any invalid values."
if self.values:
for a in self.attrs:
if example[a] not in self.values[a]:
raise ValueError('Bad value %s for attribute %s in %s' %
(example[a], self.attrnames[a], example))
|
Returns the number used for attr which can be a name or - n.. n - 1.
|
def attrnum(self, attr):
"Returns the number used for attr, which can be a name, or -n .. n-1."
if attr < 0:
return len(self.attrs) + attr
elif isinstance(attr, str):
return self.attrnames.index(attr)
else:
return attr
|
Return a copy of example with non - input attributes replaced by None.
|
def sanitize(self, example):
"Return a copy of example, with non-input attributes replaced by None."
return [attr_i if i in self.inputs else None
for i, attr_i in enumerate(example)]
|
Add an observation o to the distribution.
|
def add(self, o):
"Add an observation o to the distribution."
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
|
Include o among the possible observations whether or not it s been observed yet.
|
def smooth_for(self, o):
"""Include o among the possible observations, whether or not
it's been observed yet."""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
|
Return ( count obs ) tuples for the n most frequent observations.
|
def top(self, n):
"Return (count, obs) tuples for the n most frequent observations."
return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
|
Return a random sample from the distribution.
|
def sample(self):
"Return a random sample from the distribution."
if self.sampler is None:
self.sampler = weighted_sampler(self.dictionary.keys(),
self.dictionary.values())
return self.sampler()
|
[ Fig. 6. 3 ]
|
def AC3(csp, queue=None, removals=None):
"""[Fig. 6.3]"""
if queue is None:
queue = [(Xi, Xk) for Xi in csp.vars for Xk in csp.neighbors[Xi]]
csp.support_pruning()
while queue:
(Xi, Xj) = queue.pop()
if revise(csp, Xi, Xj, removals):
if not csp.curr_domains[Xi]:
return False
for Xk in csp.neighbors[Xi]:
if Xk != Xi:
queue.append((Xk, Xi))
return True
|
Return true if we remove a value.
|
def revise(csp, Xi, Xj, removals):
"Return true if we remove a value."
revised = False
for x in csp.curr_domains[Xi][:]:
# If Xi=x conflicts with Xj=y for every possible y, eliminate Xi=x
if every(lambda y: not csp.constraints(Xi, x, Xj, y),
csp.curr_domains[Xj]):
csp.prune(Xi, x, removals)
revised = True
return revised
|
Minimum - remaining - values heuristic.
|
def mrv(assignment, csp):
"Minimum-remaining-values heuristic."
return argmin_random_tie(
[v for v in csp.vars if v not in assignment],
lambda var: num_legal_values(csp, var, assignment))
|
Least - constraining - values heuristic.
|
def lcv(var, assignment, csp):
"Least-constraining-values heuristic."
return sorted(csp.choices(var),
key=lambda val: csp.nconflicts(var, val, assignment))
|
Prune neighbor values inconsistent with var = value.
|
def forward_checking(csp, var, value, assignment, removals):
"Prune neighbor values inconsistent with var=value."
for B in csp.neighbors[var]:
if B not in assignment:
for b in csp.curr_domains[B][:]:
if not csp.constraints(var, value, B, b):
csp.prune(B, b, removals)
if not csp.curr_domains[B]:
return False
return True
|
Maintain arc consistency.
|
def mac(csp, var, value, assignment, removals):
"Maintain arc consistency."
return AC3(csp, [(X, var) for X in csp.neighbors[var]], removals)
|
[ Fig. 6. 5 ] >>> backtracking_search ( australia ) is not None True >>> backtracking_search ( australia select_unassigned_variable = mrv ) is not None True >>> backtracking_search ( australia order_domain_values = lcv ) is not None True >>> backtracking_search ( australia select_unassigned_variable = mrv order_domain_values = lcv ) is not None True >>> backtracking_search ( australia inference = forward_checking ) is not None True >>> backtracking_search ( australia inference = mac ) is not None True >>> backtracking_search ( usa select_unassigned_variable = mrv order_domain_values = lcv inference = mac ) is not None True
|
def backtracking_search(csp,
select_unassigned_variable = first_unassigned_variable,
order_domain_values = unordered_domain_values,
inference = no_inference):
"""[Fig. 6.5]
>>> backtracking_search(australia) is not None
True
>>> backtracking_search(australia, select_unassigned_variable=mrv) is not None
True
>>> backtracking_search(australia, order_domain_values=lcv) is not None
True
>>> backtracking_search(australia, select_unassigned_variable=mrv, order_domain_values=lcv) is not None
True
>>> backtracking_search(australia, inference=forward_checking) is not None
True
>>> backtracking_search(australia, inference=mac) is not None
True
>>> backtracking_search(usa, select_unassigned_variable=mrv, order_domain_values=lcv, inference=mac) is not None
True
"""
def backtrack(assignment):
if len(assignment) == len(csp.vars):
return assignment
var = select_unassigned_variable(assignment, csp)
for value in order_domain_values(var, assignment, csp):
if 0 == csp.nconflicts(var, value, assignment):
csp.assign(var, value, assignment)
removals = csp.suppose(var, value)
if inference(csp, var, value, assignment, removals):
result = backtrack(assignment)
if result is not None:
return result
csp.restore(removals)
csp.unassign(var, assignment)
return None
result = backtrack({})
assert result is None or csp.goal_test(result)
return result
|
Solve a CSP by stochastic hillclimbing on the number of conflicts.
|
def min_conflicts(csp, max_steps=100000):
"""Solve a CSP by stochastic hillclimbing on the number of conflicts."""
# Generate a complete assignment for all vars (probably with conflicts)
csp.current = current = {}
for var in csp.vars:
val = min_conflicts_value(csp, var, current)
csp.assign(var, val, current)
# Now repeatedly choose a random conflicted variable and change it
for i in range(max_steps):
conflicted = csp.conflicted_vars(current)
if not conflicted:
return current
var = random.choice(conflicted)
val = min_conflicts_value(csp, var, current)
csp.assign(var, val, current)
return None
|
Return the value that will give var the least number of conflicts. If there is a tie choose at random.
|
def min_conflicts_value(csp, var, current):
"""Return the value that will give var the least number of conflicts.
If there is a tie, choose at random."""
return argmin_random_tie(csp.domains[var],
lambda val: csp.nconflicts(var, val, current))
|
[ Fig. 6. 11 ]
|
def tree_csp_solver(csp):
"[Fig. 6.11]"
n = len(csp.vars)
assignment = {}
root = csp.vars[0]
X, parent = topological_sort(csp.vars, root)
for Xj in reversed(X):
if not make_arc_consistent(parent[Xj], Xj, csp):
return None
for Xi in X:
if not csp.curr_domains[Xi]:
return None
assignment[Xi] = csp.curr_domains[Xi][0]
return assignment
|
Make a CSP for the problem of coloring a map with different colors for any two adjacent regions. Arguments are a list of colors and a dict of { region: [ neighbor... ] } entries. This dict may also be specified as a string of the form defined by parse_neighbors.
|
def MapColoringCSP(colors, neighbors):
"""Make a CSP for the problem of coloring a map with different colors
for any two adjacent regions. Arguments are a list of colors, and a
dict of {region: [neighbor,...]} entries. This dict may also be
specified as a string of the form defined by parse_neighbors."""
if isinstance(neighbors, str):
neighbors = parse_neighbors(neighbors)
return CSP(neighbors.keys(), UniversalDict(colors), neighbors,
different_values_constraint)
|
Convert a string of the form X: Y Z ; Y: Z into a dict mapping regions to neighbors. The syntax is a region name followed by a: followed by zero or more region names followed by ; repeated for each region name. If you say X: Y you don t need Y: X. >>> parse_neighbors ( X: Y Z ; Y: Z ) { Y: [ X Z ] X: [ Y Z ] Z: [ X Y ] }
|
def parse_neighbors(neighbors, vars=[]):
"""Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping
regions to neighbors. The syntax is a region name followed by a ':'
followed by zero or more region names, followed by ';', repeated for
each region name. If you say 'X: Y' you don't need 'Y: X'.
>>> parse_neighbors('X: Y Z; Y: Z')
{'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']}
"""
dict = DefaultDict([])
for var in vars:
dict[var] = []
specs = [spec.split(':') for spec in neighbors.split(';')]
for (A, Aneighbors) in specs:
A = A.strip()
dict.setdefault(A, [])
for B in Aneighbors.split():
dict[A].append(B)
dict[B].append(A)
return dict
|
Constraint is satisfied ( true ) if A B are really the same variable or if they are not in the same row down diagonal or up diagonal.
|
def queen_constraint(A, a, B, b):
"""Constraint is satisfied (true) if A, B are really the same variable,
or if they are not in the same row, down diagonal, or up diagonal."""
return A == B or (a != b and A + a != B + b and A - a != B - b)
|
Return an instance of the Zebra Puzzle.
|
def Zebra():
"Return an instance of the Zebra Puzzle."
Colors = 'Red Yellow Blue Green Ivory'.split()
Pets = 'Dog Fox Snails Horse Zebra'.split()
Drinks = 'OJ Tea Coffee Milk Water'.split()
Countries = 'Englishman Spaniard Norwegian Ukranian Japanese'.split()
Smokes = 'Kools Chesterfields Winston LuckyStrike Parliaments'.split()
vars = Colors + Pets + Drinks + Countries + Smokes
domains = {}
for var in vars:
domains[var] = range(1, 6)
domains['Norwegian'] = [1]
domains['Milk'] = [3]
neighbors = parse_neighbors("""Englishman: Red;
Spaniard: Dog; Kools: Yellow; Chesterfields: Fox;
Norwegian: Blue; Winston: Snails; LuckyStrike: OJ;
Ukranian: Tea; Japanese: Parliaments; Kools: Horse;
Coffee: Green; Green: Ivory""", vars)
for type in [Colors, Pets, Drinks, Countries, Smokes]:
for A in type:
for B in type:
if A != B:
if B not in neighbors[A]: neighbors[A].append(B)
if A not in neighbors[B]: neighbors[B].append(A)
def zebra_constraint(A, a, B, b, recurse=0):
same = (a == b)
next_to = abs(a - b) == 1
if A == 'Englishman' and B == 'Red': return same
if A == 'Spaniard' and B == 'Dog': return same
if A == 'Chesterfields' and B == 'Fox': return next_to
if A == 'Norwegian' and B == 'Blue': return next_to
if A == 'Kools' and B == 'Yellow': return same
if A == 'Winston' and B == 'Snails': return same
if A == 'LuckyStrike' and B == 'OJ': return same
if A == 'Ukranian' and B == 'Tea': return same
if A == 'Japanese' and B == 'Parliaments': return same
if A == 'Kools' and B == 'Horse': return next_to
if A == 'Coffee' and B == 'Green': return same
if A == 'Green' and B == 'Ivory': return (a - 1) == b
if recurse == 0: return zebra_constraint(B, b, A, a, 1)
if ((A in Colors and B in Colors) or
(A in Pets and B in Pets) or
(A in Drinks and B in Drinks) or
(A in Countries and B in Countries) or
(A in Smokes and B in Smokes)): return not same
raise 'error'
return CSP(vars, domains, neighbors, zebra_constraint)
|
Add { var: val } to assignment ; Discard the old value if any.
|
def assign(self, var, val, assignment):
"Add {var: val} to assignment; Discard the old value if any."
assignment[var] = val
self.nassigns += 1
|
Return the number of conflicts var = val has with other variables.
|
def nconflicts(self, var, val, assignment):
"Return the number of conflicts var=val has with other variables."
# Subclasses may implement this more efficiently
def conflict(var2):
return (var2 in assignment
and not self.constraints(var, val, var2, assignment[var2]))
return count_if(conflict, self.neighbors[var])
|
Return a list of applicable actions: nonconflicting assignments to an unassigned variable.
|
def actions(self, state):
"""Return a list of applicable actions: nonconflicting
assignments to an unassigned variable."""
if len(state) == len(self.vars):
return []
else:
assignment = dict(state)
var = find_if(lambda v: v not in assignment, self.vars)
return [(var, val) for val in self.domains[var]
if self.nconflicts(var, val, assignment) == 0]
|
Make sure we can prune values from domains. ( We want to pay for this only if we use it. )
|
def support_pruning(self):
"""Make sure we can prune values from domains. (We want to pay
for this only if we use it.)"""
if self.curr_domains is None:
self.curr_domains = dict((v, list(self.domains[v]))
for v in self.vars)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.