From d5dd00032e3d76655f25adc0ac3d7abe4c1a8864 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Thu, 31 Aug 2023 19:46:25 +0000 Subject: [PATCH 01/15] Main --- Student/Lecture1.lean | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 Student/Lecture1.lean diff --git a/Student/Lecture1.lean b/Student/Lecture1.lean new file mode 100644 index 00000000..c096244a --- /dev/null +++ b/Student/Lecture1.lean @@ -0,0 +1,12 @@ +def id_nat : Nat → Nat + | n => n + +def id_bool : Bool → Bool + | true => true + | false => false + +def id_bool : Bool → Bool + | b => b + +def id_string : String → String + | n => n \ No newline at end of file From a7bcb0948d7743c9b9bfd50e76ab8226d1caf2d4 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Tue, 19 Sep 2023 19:23:48 +0000 Subject: [PATCH 02/15] committing changes --- Instructor/Lectures/lecture_05.lean | 3 +- Student/hw3-student.lean | 232 ++++++++++++++++++++++++ Student/hw3_key.lean | 204 +++++++++++++++++++++ Student/hw4-student.lean | 263 ++++++++++++++++++++++++++++ 4 files changed, 701 insertions(+), 1 deletion(-) create mode 100644 Student/hw3-student.lean create mode 100644 Student/hw3_key.lean create mode 100644 Student/hw4-student.lean diff --git a/Instructor/Lectures/lecture_05.lean b/Instructor/Lectures/lecture_05.lean index d11c32e2..1d88cff4 100644 --- a/Instructor/Lectures/lecture_05.lean +++ b/Instructor/Lectures/lecture_05.lean @@ -191,7 +191,8 @@ otherwise. -- Answer here. -def is_even_len := _ -- Be sure you can solve it +def is_even_len := String → Bool + -- Be sure you can solve it #eval is_even_len "Love" -- Expect true #eval is_even_len "Love!" -- Expect false diff --git a/Student/hw3-student.lean b/Student/hw3-student.lean new file mode 100644 index 00000000..82a5e625 --- /dev/null +++ b/Student/hw3-student.lean @@ -0,0 +1,232 @@ +/-! +# Homework #3 + +Near final DRAFT. + +## Overview and Rules + +The purpose of this homework is to strengthen your +understanding of function composition and of enumerated +and product data types. + +The collaboration rule for this homework is that +you may *not* collaborate. You can ask friends and +colleagues to help you understand material in the +class notes, but you may not discuss any aspect +of this homework itself with anyone other than one +of the instructors or TAs. Why? Because *you* need +to learn this material to pass the exam to come. +-/ + +/-! +## Problem #1 + +Define a funtion of the following polymorphic type: +{α β γ : Type} → (β → γ) → (α → β) → (α → γ). Call it +*funkom*. After the implicit type arguments it should +take two function arguments and return a function as +a result. +-/ + +-- Answer below + +def funkom {α β γ : Type} : (β → γ) → (α → β) → (α → γ) +| g, f => (fun a => g (f a)) + +/-! +## Problem #2 + +Define a function of the following polymorphic type: +{α β : Type} → (a : α) → (b : β) → α × β. Call it mkop. +-/ + +-- Answer below + +def mkop {α β : Type} : (a : α) → (b : β) → α × β +| a,b => (a, b) + +#check (@mkop) + +/-! +## Problem #3 + +Define a function of the following polymorphic type: +{α β : Type} → α × β → α. Call it op_left. +-/ + +-- Answer below +def op_left {α β : Type} : α × β → α +| (a,_) => a + +#check (@op_left) + + +/-! +## Problem #4 + +Define a function of the following polymorphic type: +{α β : Type} → α × β → β. Call it op_right. +-/ + +-- Answer below + +def op_right {α β : Type} : α × β → β +| (_,b) => b + +#check (@op_right) +/-! +## Problem #5 + +Define a data type called *Day*, the values of which +are the names of the seven days of the week: *sunday, +monday,* etc. + +Some days are work days and some days are play +days. Define a data type, *kind*, with two values, +*work* and *play*. + +Now define a function, *day2kind*, that takes a *day* +as an argument and returns the *kind* of day it is as +a result. Specify *day2kind* so that weekdays (monday +through friday) are *work* days and weekend days are +*play* days. + +Next, define a data type, *reward*, with two values, +*money* and *health*. + +Now define a function, *kind2reward*, from *kind* to +*reward* where *reward work* is *money* and *reward play* +is *health*. + +Finally, use your *funkom* function to produce a new +function that takes a day and returns the corresponding +reward. Call it *day2reward*. + +Include test cases using #reduce to show that the reward +from each weekday is *money* and the reward from a weekend +day is *health*. +-/ + +inductive Day : Type +| sunday +| monday +| tuesday +| wednesday +| thursday +| friday +| saturday + +open Day + +inductive kind : Type +| work +| play + +open kind + +def day2kind : Day → kind +| saturday => play +| sunday => play +| _ => work + +inductive reward : Type +| money +| health + +open reward + +def kind2reward : kind → reward +| work => money +| play => health + +def day2reward : Day → reward +| d => funkom kind2reward day2kind d + +#reduce day2reward monday +#reduce day2reward tuesday +#reduce day2reward wednesday +#reduce day2reward thursday +#reduce day2reward friday +#reduce day2reward saturday +#reduce day2reward sunday + + + + +/-! +## Problem #6 + +### A. +Consider the outputs of the following #check commands. +-/ + +#check Nat × Nat × Nat +#check Nat × (Nat × Nat) +#check (Nat × Nat) × Nat + +/-! +Is × left associative or right associative? Briefly explain +how you reached your answer. + +Answer here: Right associative, because the type of the last line is (Nat × Nat) × Nat which means that the +product in the parentheses was evaluated second while in the line above, the parentheses were evaluated first and so it behaves +like how → behaves, which is right associated. + +### B. +Define a function, *triple*, of the following type: +{ α β γ : Type } → α → β → γ → (α × β × γ) +-/ + +-- Here: + +def triple {α β γ : Type} : α → β → γ → (α × β × γ) +| a, b, c => (a, b, c) + +#check (@triple) + +/-! +### C. +Define three functions, call them *first*, *second*, +and *third*, each of which takes any such triple as +an argument and that returns, respectively, its first, +second, or third elements. +-/ + +-- Here: + +def first {α β γ : Type} : α × β × γ → α +| (a, _, _) => a + +def second {α β γ : Type} : α × β × γ → β +| (_, b, _) => b + +def third {α β γ : Type} : α × β × γ → γ +| (_, _, c) => c + +/-! +### D. +Write three test cases using #eval to show that when +you apply each of these "elimination" functions to a +triple (that you can make up) it returns the correct +element of that triple. +-/ + +-- Here: + +#eval first (triple 1 2 3) +#eval second (triple 1 2 3) +#eval third (triple 1 2 3) + +/-! +### E. +Use #check to check the type of a term. that you make +up, of type (Nat × String) × Bool. The challenge here +is to write a term of that type. +-/ + +def prod_nat_string_bool: (Nat × String) × Bool := Prod.mk (1, "Hello") true + +#check prod_nat_string_bool + + + diff --git a/Student/hw3_key.lean b/Student/hw3_key.lean new file mode 100644 index 00000000..80287367 --- /dev/null +++ b/Student/hw3_key.lean @@ -0,0 +1,204 @@ +/-! +# Homework #3 + +Near final DRAFT. + +## Overview and Rules + +The purpose of this homework is to strengthen your +understanding of function composition and of enumerated +and product data types. + +The collaboration rule for this homework is that +you may *not* collaborate. You can ask friends and +colleagues to help you understand material in the +class notes, but you may not discuss any aspect +of this homework itself with anyone other than one +of the instructors or TAs. Why? Because *you* need +to learn this material to pass the exam to come. +-/ + +/-! +## Problem #1 + +Define a function of the following polymorphic type: +{α β γ : Type} → (β → γ) → (α → β) → (α → γ). Call it +*funkom*. After the implicit type arguments it should +take two function arguments and return a function as +a result. +-/ + +-- Answer below + +def funkom: {α β γ : Type} → (β → γ) → (α → β) → (α → γ) +| α, β, γ, g, f => + +/-! +We'll implement a function of this type using what +we call top-down type-guided structured programming. +-/ + + + +/-! +Here's the procedure we followed when going over the HW. +- introduce the name of the function with def +- specify the *type* of the function after a : +- use pattern matching bind names to unnamed arguments +- "stub out" the entire return value using _ +- see the *context* for what you have and what you need + - some arbitrary (unspecified) type value, α, β, and γ + - an arbitrary function value, *g : β → γ* + - an arbitrary function value, *f : α → β* + - an arbitrary value, *a* of type *α* +- the challenge: construct a *γ* given what you've assumed you have +- incrementally expand the implementation, from the outside to in + - I need a γ value; I can get it by applying *g* to a β value + - I need a β value; I can get that by applying *f* to an α + - I have an α in +-/ + +/-! +## Problem #2 + +Define a function of the following polymorphic type: +{α β : Type} → (a : α) → (b : β) → α × β. Call it mkop. +-/ + +-- Answer below + +/-! +We recommend the same top-down, type-guided structured +programming approach. Some people would simply call it +top-down refinement, or top-down programming. It's an +essential strategy that every great programmer knows. +-/ + +/-! +## Problem #3 + +Define a function of the following polymorphic type: +{α β : Type} → α × β → α. Call it op_left. +-/ + +-- Answer below + +-- Hint: Use top-down structured programming! + + +/-! +## Problem #4 + +Define a function of the following polymorphic type: +{α β : Type} → α × β → β. Call it op_right. +-/ + +-- Answer below + +-- Hint: Use top-down structured programming + + + +/-! +## Problem #5 + +Define a data type called *Day*, the values of which +are the names of the seven days of the week: *sunday, +monday,* etc. +-/ + +/-! +Some days are work days and some days are play +days. Define a data type, *kind*, with two values, +*work* and *play*. +-/ + +/-! +Now define a function, *day2kind*, that takes a *day* +as an argument and returns the *kind* of day it is as +a result. Specify *day2kind* so that weekdays (monday +through friday) are *work* days and weekend days are +*play* days. +-/ + +/-! +Next, define a data type, *reward*, with two values, +*money* and *health*. +-/ + +/-! +Now define a function, *kind2reward*, from *kind* to +*reward* where *reward work* is *money* and *reward play* +is *health*. +-/ + +/-! +Finally, use your *funkom* function to produce a new +function that takes a day and returns the corresponding +reward. Call it *day2reward*. +-/ + +/-! +Include test cases using #reduce to show that the reward +from each weekday is *money* and the reward from a weekend +day is *health*. +-/ + +/-! +## Problem #6 + +### A. +Consider the outputs of the following #check commands. +-/ + +#check Nat × Nat × Nat +#check Nat × (Nat × Nat) +#check (Nat × Nat) × Nat + +/-! +Is × left associative or right associative? Briefly explain +how you reached your answer. + +Answer here: + +### B. +Define a function, *triple*, of the following type: +{ α β γ : Type } → α → β → γ → (α × β × γ) +-/ + +-- Here: + +-- Hints: (1) put in parens for clarity; (2) use TDSP. + +/-! +### C. +Define three functions, call them *first*, *second*, +and *third*, each of which takes any such triple as +an argument and that returns, respectively, its first, +second, or third elements. +-/ + +-- Here: + +-- Ok, this one takes a small leap of imagination + +/-! +### D. +Write three test cases using #eval to show that when +you apply each of these "elimination" functions to a +triple (that you can make up) it returns the correct +element of that triple. +-/ + +-- Here: + +/-! +### E. +Use #check to check the type of a term. that you make +up, of type (Nat × String) × Bool. The challenge here +is to write a term of that type. +-/ + + + + diff --git a/Student/hw4-student.lean b/Student/hw4-student.lean new file mode 100644 index 00000000..4e61f0fb --- /dev/null +++ b/Student/hw4-student.lean @@ -0,0 +1,263 @@ +/-! +# Homework #4 + +The PURPOSE of this homework is to greatly strengthen +your understanding of the construction and use of the +data types we've introduced to far, especially the sum +and product types. You will be asked to solve problems +that in some cases will require a bit of programming +creativity, requiring you to to put together several +of the ideas we've covered so far. + +READ THIS: It is vitally important that you learn how +to solve these problems on your own. You will have to +be able to do this to do well on the first exam, a month +or so away. Therefore, the collaboration rule for this +homework is that you may *not* collaborate. You can ask +friends and colleagues to help you understand the class +material, but you may not discuss this homework itself +with anyone other than one of the instructors or TAs. +-/ + +/-! +## #1: Read All of the Class Notes + +You won't be graded on this part of the assignment, +but it is nevertheless serious and required work on +your part. Read and genuinely understand *all* the +class notes through lecture_08. Everything that we +have covered in class is covered in the notes, and +more. You can work with the examples in the notes in +VSCode by opening the corresponding files. Don't be +afraid to "play around" with the examples in VSCode. +Doing to will really solidify your understanding. +-/ + +/-! +## #2. Is Prod Commutative? + +If you have *bread and cheese* can you always get +yourself *cheese and bread?* Let's convert this +question into one that's both more abstract and +general as well as formal (mathematical). + +If you're given types, α and β, and an arbitrary +ordered pair of type α × β, can you always construct +and return a value of type β × α? Prove that the +answer is yes by writing a function that takes any +value of type α × β value and that returns a value +of type β × α. Call your function prod_comm. +-/ + +def prod_comm { α β : Type } : α × β → β × α +| _ => _ + +/-! +Is the transformation from *α × β* to *β × α* +reversible? That is, given types *α* and *β* (in +that order), then if you have any term of type +*β × α*, can you always convert it into a term +of type *α × β*? Prove it by defining a function +of the appropriate type. Call it prod_com_reverse. +-/ + +-- Here: + +/-! +## #3: Associativity of Prod + +Suppose you have *bread and (cheese and jam)*. +Can you have *(bread and cheese) and jam* (just +grouping the *ands* differently)? Let's again turn +this into an abstract, general, and formal question, +using *α, β,* and *γ* as names instead, of *bread, +cheese,* and *jam*. + +Suppose α, β, and γ are arbitrary types. If you're +given an arbitrary *value* of type α × (β × γ), can +you can always produce a value of type (α × β) × γ? + +To show that you can, write a function of type +{ α β γ : Type} → (α × (β × γ)) → ((α × β) × γ). +Call it prod_assoc. We declare the type parameters +before the colon in our skeleton definition so that +you don't have to match on them. Hint: You can use +ordered pair notation to match the input value. +-/ + +-- Here + +def prod_assoc { α β γ : Type} : α × (β × γ) → (α × β) × γ +| _ => _ + +/-! +Prove that the conversion works in the reverse direction +as well, from *(α × β) × γ* to *α × (β × γ)* by defining +a function, *prod_assoc_reverse* accordingly. +-/ + +-- Here: + +/-! +## #4. Is Sum Commutative? + +Suppose you have either bread or cheese. Can you +always have either cheese or bread? In other words +are sums *commutative?* That's the technical word +that we use for any *operator*, such as +, with the +property that *a + b* is equivalent to *b + a*. + +Once again let's formulate the question abstractly, +in a general way, and with mathematical precision. + +If you have either a value of type α or a value of +type β, then do you have either a value of type β +or a value of type α? The answer should be obvious. +To prove it, define a function, that, when applied +to any term of type α ⊕ β, returns a value of type +β ⊕ α. Call it sum_comm. + +Note that in the outline code we provide we use a +syntax that is a bit new. Re-read the material in +the notes if necessary. We declare the type of +*sum_com* then use a := followed by a *lambda +expression* that gives the function definition. +That expression, in turn uses an explcit match +statement. The form you've mostly seen up to now +is really just notational shorthand for this kind +of definition. +-/ + +def sum_comm { α β : Type} : α ⊕ β → β ⊕ α := +fun s => + match s with + | _ => _ + | _ => _ + +/-! +Can you always convert a term of type *β ⊗ α* into +one of type *α ⊕ β*? Prove it by writing a function +that does it. Call is sum_comm_reverse. +-/ + +-- Here: + + +/-! +## #5: Is Sum Associative? + +If you have bread or (cheese or jam), can you always +have (bread or cheese) or jam? In other words, are sum +types *associative*? That's the word we use for an +operator with the property that *a + (b + c)* is +equivalent to *(a + b) + c*. You can *associate* the +arguments differently without really changing the +meaning. + +So, if you have an arbitrary value of type α ⊕ (β ⊕ γ) +can you construct a value of type (α ⊕ β) ⊕ γ? If you +answer yes, prove it by defining a function of type +α ⊕ (β ⊕ γ) → (α ⊕ β) ⊕ γ. Call it sum_assoc. + +Hint: Consider two cases for α ⊕ (β ⊕ γ), and within +the "right" case, consider two further cases. You can +solve this problem with three matching patterns: one +for the first case and two for the second, each of +which starts with a Sum.inr. You will need to use +"nested" expressions involving Sum.inl and Sum.inr +in both matching and to define return result values. +-/ + +def sum_assoc { α β γ : Type} : α ⊕ (β ⊕ γ) → (α ⊕ β) ⊕ γ +| (Sum.inl a) => (Sum.inl _) +| (Sum.inr (Sum.inl b)) => _ +| _ => _ + +/-! +Does this conversion also work in reverse? Prove it +with a function that takes a term of the second sum type +(in the preceding example) as an argument and that returns +a value of the first sum type as a result. +-/ + +-- Here: + +/-! +## #6. Products Distribute Over Sums + +If you have bread and (cheese or jam) do you have +(bread and cheese) or (bread and jam)? We think so. +Before you move on, think about it! + +Define *prod_dist_sum : α × (β ⊕ γ) → (α × β) ⊕ (α × γ).* +In other words, if you have a value that includes (1) a +value of type *α* and (2) either a value of type *β* or +a value of type *γ*, then you can derive a value that is +either an *α* value and a *β* value, or an *α* value and +a *γ* value. + -/ + + def prod_dist_sum {α β γ : Type} : _ + | _ => _ + | _ => _ + +/-! +Does the preceding principle work in reverse? In other +words, if you have *(α × β) ⊕ (α × γ)* can you derive +*α × (β ⊕ γ)*? Concretely, if you have either bread and +cheese or bread and jam. do you have bread and either +cheese or jam? Prove it with a function, that converts +any value of type *(α × β) ⊕ (α × γ)* into one of type +*α × (β ⊕ γ)*. +-/ + +-- Here: + +/-! +In the forward (first) direction we can say that products +distribute over sums, just as, say, *4 * (2 + 3)* is the +same as (4 * 2) + (4 * 3)*. In the reverse direction, we +can say that can *factor out* the common factor, *4*. So +in a sense, we're now doing Algebra 1 but with sandwiches! +-/ + +/-! +## #8. Sum Elimination + +Suppose you're given: (1) types called *rain, sprinkler,* +and *wet*; (2) a value of type *rain ⊕ sprinkler*; and (3), +two functions, of types *rain → wet* and *sprinkler → wet*. +Show that you can construct and return a value of type *wet*. +Do this by defining a function called *its_wet*, that, if +given values of those types as arguments, returns a value of +type *wet*. +-/ + +-- Here + +/-! +Now rewrite your function using the type names, +*α, γ,* and *β* instead of *rain, sprinkler* and +*wet*. Call it *sum_elim*. +-/ + +-- Here: + +/-! +You should now better understand how to program +with arbitrary values of arbitrary sum types. To do +so, you need to be able to derive a result of the +return type, *γ* from *either* of the possible types +in the sum: from a value of either type *α* or *β*. +-/ + +/-! +## Wrap-Up + +The programs (functions) we've asked you to write +for this homework are deeply important, in that +they correspond directly to fundamental principles +of logical reasoning. The until now hidden purpose +of this assignment has been to warm you up to this +profound idea. +-/ From e2f3226b12e8663cd2c6ee59e5f9689b0fac1ed6 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Tue, 19 Sep 2023 19:24:14 +0000 Subject: [PATCH 03/15] committ --- Instructor/Homework/hw3_key.lean | 65 ++--------------------- Student/hw4-student.lean | 91 ++++++++++++++++++++++++++++---- 2 files changed, 84 insertions(+), 72 deletions(-) diff --git a/Instructor/Homework/hw3_key.lean b/Instructor/Homework/hw3_key.lean index 74c8ee4e..e37a90eb 100644 --- a/Instructor/Homework/hw3_key.lean +++ b/Instructor/Homework/hw3_key.lean @@ -30,14 +30,15 @@ a result. -- Answer below -def funkom : {α β γ : Type} → (β → γ) → (α → β) → (α → γ) -| α, β, γ, g, f => fun (a : α) => (g (f a)) + /-! We'll implement a function of this type using what we call top-down type-guided structured programming. -/ + + /-! Here's the procedure we followed when going over the HW. - introduce the name of the function with def @@ -65,9 +66,6 @@ Define a function of the following polymorphic type: -- Answer below -def mkop : {α β : Type} → (a : α) → (b : β) → α × β -| α, β, a, b => (a,b) -- Prod.mk a b - /-! We recommend the same top-down, type-guided structured programming approach. Some people would simply call it @@ -84,9 +82,6 @@ Define a function of the following polymorphic type: -- Answer below -def op_left : {α β : Type} → α × β → α -| α, β, p => Prod.fst p - -- Hint: Use top-down structured programming! @@ -101,8 +96,6 @@ Define a function of the following polymorphic type: -- Hint: Use top-down structured programming -def op_right {α β : Type} : α × β → β -| p => match p with | (_, b) => b /-! @@ -113,31 +106,12 @@ are the names of the seven days of the week: *sunday, monday,* etc. -/ -inductive Day : Type -|sunday -|monday -|tuesday -|wednesday -|thursday -|friday -|saturday - -open Day - -#check sunday - /-! Some days are work days and some days are play days. Define a data type, *kind*, with two values, *work* and *play*. -/ -inductive kind : Type -| work -| play - -open kind - /-! Now define a function, *day2kind*, that takes a *day* as an argument and returns the *kind* of day it is as @@ -146,45 +120,23 @@ through friday) are *work* days and weekend days are *play* days. -/ -def day2kind : Day → kind -| sunday => play -| saturday => play -| _ => work - -#reduce day2kind thursday - /-! Next, define a data type, *reward*, with two values, *money* and *health*. -/ -inductive reward : Type -| money -| health - -open reward - /-! Now define a function, *kind2reward*, from *kind* to *reward* where *reward work* is *money* and *reward play* is *health*. -/ -def kind2reward : kind → reward -| work => money -| play => health - /-! Finally, use your *funkom* function to produce a new function that takes a day and returns the corresponding reward. Call it *day2reward*. -/ -def day2reward := funkom kind2reward day2kind - -#reduce day2reward tuesday -#reduce day2reward saturday - /-! Include test cases using #reduce to show that the reward from each weekday is *money* and the reward from a weekend @@ -202,9 +154,6 @@ Consider the outputs of the following #check commands. #check Nat × (Nat × Nat) #check (Nat × Nat) × Nat -#check (1,(2,3)) -#check ((1,2),3) - /-! Is × left associative or right associative? Briefly explain how you reached your answer. @@ -218,9 +167,6 @@ Define a function, *triple*, of the following type: -- Here: -def triple : { α β γ : Type } → α → β → γ → (α × β × γ) -| α, β, γ, a, b , y => (a,(b,y)) -- Prod.mk a (Prod.mk b y) - -- Hints: (1) put in parens for clarity; (2) use TDSP. /-! @@ -231,10 +177,7 @@ an argument and that returns, respectively, its first, second, or third elements. -/ --- Here - -def second { α β γ : Type } : α × β × γ → β -| (_,b,_) => b +-- Here: -- Ok, this one takes a small leap of imagination diff --git a/Student/hw4-student.lean b/Student/hw4-student.lean index 4e61f0fb..6aaf56aa 100644 --- a/Student/hw4-student.lean +++ b/Student/hw4-student.lean @@ -50,7 +50,9 @@ of type β × α. Call your function prod_comm. -/ def prod_comm { α β : Type } : α × β → β × α -| _ => _ +| (a,b) => (b,a) + +-- #eval prod_comm (4, true) /-! Is the transformation from *α × β* to *β × α* @@ -62,6 +64,10 @@ of the appropriate type. Call it prod_com_reverse. -/ -- Here: +def prod_com_reverse {α β : Type} : β × α → α × β +| (a,b) => (b,a) + +--#check prod_com_reverse /-! ## #3: Associativity of Prod @@ -88,7 +94,9 @@ ordered pair notation to match the input value. -- Here def prod_assoc { α β γ : Type} : α × (β × γ) → (α × β) × γ -| _ => _ +| (a,(b,c)) => ((a,b),c) + +--#check prod_assoc (5,(true, "hello")) /-! Prove that the conversion works in the reverse direction @@ -97,6 +105,10 @@ a function, *prod_assoc_reverse* accordingly. -/ -- Here: +def prod_assoc_reverse { α β γ : Type} : (α × β) × γ → α × (β × γ) +| ((a,b),c) => (a,(b,c)) + +--#check prod_assoc_reverse (prod_assoc (5,(true, "hello"))) /-! ## #4. Is Sum Commutative? @@ -131,8 +143,21 @@ of definition. def sum_comm { α β : Type} : α ⊕ β → β ⊕ α := fun s => match s with - | _ => _ - | _ => _ + | (Sum.inl a) => (Sum.inr a) + | (Sum.inr b) => (Sum.inl b) + + +/- Alternate version (shorter) + +def sum_comm' { α β : Type} : α ⊕ β → β ⊕ α + | (Sum.inl a) => (Sum.inr a) + | (Sum.inr b) => (Sum.inl b) + +-/ + + + +-- #check sum_comm (@Sum.inl Nat Bool 1) /-! Can you always convert a term of type *β ⊗ α* into @@ -142,6 +167,13 @@ that does it. Call is sum_comm_reverse. -- Here: +def sum_comm_reverse { α β : Type} :β ⊕ α → α ⊕ β := +fun s => + match s with + | (Sum.inl b) => (Sum.inr b) + | (Sum.inr a) => (Sum.inl a) + +-- #eval sum_comm_reverse (sum_comm (@Sum.inl Nat Bool 1)) /-! ## #5: Is Sum Associative? @@ -169,9 +201,12 @@ in both matching and to define return result values. -/ def sum_assoc { α β γ : Type} : α ⊕ (β ⊕ γ) → (α ⊕ β) ⊕ γ -| (Sum.inl a) => (Sum.inl _) -| (Sum.inr (Sum.inl b)) => _ -| _ => _ +| (Sum.inl a) => (Sum.inl (Sum.inl a)) +| (Sum.inr (Sum.inl b)) => (Sum.inl (Sum.inr b)) +| (Sum.inr (Sum.inr c)) => (Sum.inr c) + + +-- #check sum_assoc (@Sum.inr Nat (Bool ⊕ String) (@Sum.inr Bool String "hello")) /-! Does this conversion also work in reverse? Prove it @@ -181,6 +216,12 @@ a value of the first sum type as a result. -/ -- Here: +def sum_assoc_reverse { α β γ : Type} : (α ⊕ β) ⊕ γ → α ⊕ (β ⊕ γ) +| (Sum.inr c) => (Sum.inr (Sum.inr c)) +| (Sum.inl (Sum.inl a)) => (Sum.inl a) +| (Sum.inl (Sum.inr b)) => (Sum.inr (Sum.inl b)) + +-- #check sum_assoc_reverse (sum_assoc (@Sum.inl Nat (Bool ⊕ String) 1)) /-! ## #6. Products Distribute Over Sums @@ -197,9 +238,11 @@ either an *α* value and a *β* value, or an *α* value and a *γ* value. -/ - def prod_dist_sum {α β γ : Type} : _ - | _ => _ - | _ => _ + def prod_dist_sum {α β γ : Type} : α × (β ⊕ γ) → (α × β) ⊕ (α × γ) + | (a, Sum.inr c) => (Sum.inr (a, c)) + | (a, Sum.inl b) => (Sum.inl (a, b)) + +-- #eval prod_dist_sum ("hello", (@Sum.inl Nat Bool 1)) /-! Does the preceding principle work in reverse? In other @@ -212,7 +255,13 @@ any value of type *(α × β) ⊕ (α × γ)* into one of type -/ -- Here: - + +def prod_dist_sum_reverse {α β γ : Type} : (α × β) ⊕ (α × γ) → α × (β ⊕ γ) + | Sum.inr (a,c) => (a, Sum.inr c) + | Sum.inl (a,b) => (a, Sum.inl b) + +-- #eval prod_dist_sum_reverse (prod_dist_sum ("hello", (@Sum.inr Nat Bool true))) + /-! In the forward (first) direction we can say that products distribute over sums, just as, say, *4 * (2 + 3)* is the @@ -235,6 +284,10 @@ type *wet*. -- Here +def its_wet {rain sprinkler wet : Type} : rain ⊕ sprinkler → (rain → wet) → (sprinkler → wet) → wet +| (Sum.inl r), r2w, _ => r2w r +| (Sum.inr s), _, s2w => s2w s + /-! Now rewrite your function using the type names, *α, γ,* and *β* instead of *rain, sprinkler* and @@ -243,6 +296,22 @@ Now rewrite your function using the type names, -- Here: +def sum_elim {α γ β : Type} : α ⊕ β → (α → γ) → (β → γ) → γ +| (Sum.inl a), a2c, _ => a2c a +| (Sum.inr b), _, b2c => b2c b + +/- + +def nat2bool: Nat → Bool +| _ => true + +def string2bool: String → Bool +| _ => false + +#eval sum_elim (@Sum.inr Nat String "hello") nat2bool string2bool + +-/ + /-! You should now better understand how to program with arbitrary values of arbitrary sum types. To do From aa834f09173e8408ade9ac5507a93818bac81f17 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Thu, 21 Sep 2023 20:35:55 +0000 Subject: [PATCH 04/15] ok --- Student/hw5_student.lean | 114 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 Student/hw5_student.lean diff --git a/Student/hw5_student.lean b/Student/hw5_student.lean new file mode 100644 index 00000000..ca3af391 --- /dev/null +++ b/Student/hw5_student.lean @@ -0,0 +1,114 @@ +/-! +# Homework 5: Inhabitedness + +The PURPOSE of this homework is to greatly strengthen +your understanding of reasoning with sum and product +types along with properties of being inhabited or not. + +READ THIS: The collaboration rule for this homework is +that you may *not* collaborate. You can ask friends and +colleagues to help you understand the class material, +but you may not discuss any of these homework problems +with anyone other than one of the instructors or TAs. + +Finally, what you're seeing here is the FIRST set of +questions on this homework, giving you an opportunity +to deepen your understanding of the Empty type and its +uses. +-/ + +/-! + +## PART 1 + +Of particular importance in these questions is the +idea that *having* a function value (implementation) +of type *α → Empty* proves that α is uninhabited, in +that if there were a value (a : α) then you'd be able +to derive a value of type Empty, and that simply can't +be done, so there must be no such (a : α). That's the +great idea that we reached at the end of lecture_09. + +More concretely every time you see function type that +looks like (α → Empty) in what follows, you can read +it as saying *there is no value of type α*. Second, if +youwant to *return* a result of type (α → Empty), to +showing that there can be no α value, then you need +to return a *function*; and you will often want to do +so by writing the return value as a lambda expression. +-/ + +/-! +### #1 Not Jam or Not Cheese Implies Not Jam and Cheese + +Suppose you don't have cheese OR you don't have jam. +Then it must be that you don't have (cheese AND jam). +Before you go on, think about why this has to be true. +Here's a proof of it in the form of a function. The +function takes jam and cheese implicitly as types. +It takes a value that either indicates there is no +jam, or a value that indicates that there's no cheese, +and you are to construct a value that shows that there +can be no jam and cheese. It works by breaking the first +argument into two cases: either a proof that there is +no jam (there are no values of this type), or a proof +that there is no cheese, and shows *in either case* +that there can be no jam AND cheese. +-/ + +def not_either_not_both { jam cheese } : + ((jam → Empty) ⊕ (cheese → Empty)) → + (jam × cheese → Empty) +| Sum.inl nojam => (fun jamAndCheese => nojam jamAndCheese.1) +| Sum.inr nocheese => (fun jamAndCheese => nocheese jamAndCheese.2) + +/-! +### #2: Not One or Not the Other Implies Not Both +Now prove this principle *in general* by defining a +function, demorgan1, of the following type. It's will +be the same function, just with the names α and β for +the types, rather than the more suggestive but specific +names, *jam* and *cheese*. + +{α β : Type} → (α → Empty ⊕ β → Empty) → (α × β → Empty). +-/ + +def demorgan1 {α β : Type} : ((α → Empty) ⊕ (β → Empty)) → (α × β → Empty) +| (Sum.inl noa) => (fun aab => noa aab.1) +| (Sum.inr nob) => (fun aab => nob aab.2) + +/-! +### #3: Not Either Implies Not One And Not The Other +Now suppose that you don't have either jam and cheese. +Then you don't have jam and you don't have cheese. More +generally, if you don't have an α OR a β, then you can +conclude that you don't have an α Here's a function type that asserts +this fact in a general way. Show it's true in general +by implementing it. An implementation will show that +given *any* types, α and β, +-/ + +def demorgan2 {α β : Type} : (α ⊕ β → Empty) → ((α → Empty) × (β → Empty)) +| noaorb => ((fun a => noaorb (Sum.inl a)), (fun b => noaorb (Sum.inr b))) + + +/-! +### #4: Not One And Not The Other Implies Not One Or The Other +Suppose you know that there is no α AND there is no β. Then +you can deduce that there can be no (α ⊕ β) object. Again +we give you the function type that expresses this idea, +and you must show it's true by implementing the function. +Hint: You might want to use an explicit match expression +in writing your solution. +-/ +def demorgan3 {α β : Type} : ((α → Empty) × (β → Empty)) → ((α ⊕ β) → Empty) +| (a2e,b2e) => (fun asumb => (a2e (asumb))) + + +/-! +## PART 2 + +Coming Soon. +-/ + + From 969cf63bdb126b2dcd5f34f1d947ce4753ce2248 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Thu, 21 Sep 2023 20:36:08 +0000 Subject: [PATCH 05/15] yes --- Student/hw5_student.lean | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Student/hw5_student.lean b/Student/hw5_student.lean index ca3af391..43842599 100644 --- a/Student/hw5_student.lean +++ b/Student/hw5_student.lean @@ -102,8 +102,10 @@ Hint: You might want to use an explicit match expression in writing your solution. -/ def demorgan3 {α β : Type} : ((α → Empty) × (β → Empty)) → ((α ⊕ β) → Empty) -| (a2e,b2e) => (fun asumb => (a2e (asumb))) - +| (a2e,b2e) => fun asumb => + match asumb with + | (Sum.inl a) => a2e a + | (Sum.inr b) => b2e b /-! ## PART 2 From 1d7447215f339c642a38369d1efe5e262b848bd9 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Tue, 26 Sep 2023 19:32:30 +0000 Subject: [PATCH 06/15] df --- Student/hw4-student.lean | 8 +- Student/hw5_student.lean | 199 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 196 insertions(+), 11 deletions(-) diff --git a/Student/hw4-student.lean b/Student/hw4-student.lean index 6aaf56aa..81bcf115 100644 --- a/Student/hw4-student.lean +++ b/Student/hw4-student.lean @@ -167,11 +167,9 @@ that does it. Call is sum_comm_reverse. -- Here: -def sum_comm_reverse { α β : Type} :β ⊕ α → α ⊕ β := -fun s => - match s with - | (Sum.inl b) => (Sum.inr b) - | (Sum.inr a) => (Sum.inl a) +def sum_comm_reverse { α β : Type} :β ⊕ α → α ⊕ β +| (Sum.inl b) => (Sum.inr b) +| (Sum.inr a) => (Sum.inl a) -- #eval sum_comm_reverse (sum_comm (@Sum.inl Nat Bool 1)) diff --git a/Student/hw5_student.lean b/Student/hw5_student.lean index 43842599..4b98f367 100644 --- a/Student/hw5_student.lean +++ b/Student/hw5_student.lean @@ -56,9 +56,27 @@ that there is no cheese, and shows *in either case* that there can be no jam AND cheese. -/ +/-! +## New Addition: no (α : Type) := α → Empty + +We can make the logical intent of our +types and computations clearer by introducing a +shorthand, *no α* for the type *α → Empty*. Then +in each place where a type like *α → Empty appears +in this homework, replace it with *no α*. Use the +right local names in each instance, of course. +-/ +def no (α : Type) := α → Empty + +/-! +We've now replaced each α → Empty with no α. We +suggest that you go ahead and use *no* wherever +doing so makes the logical meaning clearer. +-/ + def not_either_not_both { jam cheese } : - ((jam → Empty) ⊕ (cheese → Empty)) → - (jam × cheese → Empty) + ((no jam) ⊕ (no cheese)) → + (no (jam × cheese)) | Sum.inl nojam => (fun jamAndCheese => nojam jamAndCheese.1) | Sum.inr nocheese => (fun jamAndCheese => nocheese jamAndCheese.2) @@ -73,7 +91,7 @@ names, *jam* and *cheese*. {α β : Type} → (α → Empty ⊕ β → Empty) → (α × β → Empty). -/ -def demorgan1 {α β : Type} : ((α → Empty) ⊕ (β → Empty)) → (α × β → Empty) +def demorgan1 {α β : Type} : ((no α) ⊕ (no β)) → (no (α × β)) | (Sum.inl noa) => (fun aab => noa aab.1) | (Sum.inr nob) => (fun aab => nob aab.2) @@ -88,7 +106,7 @@ by implementing it. An implementation will show that given *any* types, α and β, -/ -def demorgan2 {α β : Type} : (α ⊕ β → Empty) → ((α → Empty) × (β → Empty)) +def demorgan2 {α β : Type} : (no (α ⊕ β)) → ((no α) × (no β)) | noaorb => ((fun a => noaorb (Sum.inl a)), (fun b => noaorb (Sum.inr b))) @@ -101,7 +119,7 @@ and you must show it's true by implementing the function. Hint: You might want to use an explicit match expression in writing your solution. -/ -def demorgan3 {α β : Type} : ((α → Empty) × (β → Empty)) → ((α ⊕ β) → Empty) +def demorgan3 {α β : Type} : ((no α) × (no β)) → (no (α ⊕ β)) | (a2e,b2e) => fun asumb => match asumb with | (Sum.inl a) => a2e a @@ -110,7 +128,176 @@ def demorgan3 {α β : Type} : ((α → Empty) × (β → Empty)) → ((α ⊕ /-! ## PART 2 -Coming Soon. +The following problems aim to strengthen your +understanding of inductive type definitions and +recusrive functions. +-/ + +-- Here are some named Nat values, for testing +def n0 := Nat.zero +def n1 := Nat.succ n0 +def n2 := Nat.succ n1 +def n3 := Nat.succ n2 +def n4 := Nat.succ n3 +def n5 := Nat.succ n4 + +/-! +### #1. Pattern Matching Enables Destructuring + +#1: Defne a function, pred: Nat → Nat, that takes an any +Nat, n, and, if n is zero, returns zero, otherwise analyze +n as (Nat.succ n') and return n'. Yes this question should +be easy. Be sure you understand destructuring and pattern +matching. +-/ + +-- Here +def pred: Nat → Nat +| 0 => 0 +| Nat.succ n' => n' + +-- Test cases +#reduce pred 3 -- expect 2 +#reduce pred 0 -- expect 0 + +/-! +### #2. Big Doll from Smaller One n Times + +Write a function, *mk_doll : Nat → Doll*, that takes +any natural number argument, *n*, and that returns a doll +n shells deep. The verify using #reduce that (mk_doll 3) +returns the same doll as *d3*. +-/ + +-- Answer here + +inductive Doll : Type +| solid +| shell (d : Doll) + +open Doll + +def mk_doll: Nat → Doll +| 0 => solid +| (n+1) => shell (mk_doll n) + +-- test cases +#check mk_doll 3 +#reduce mk_doll 3 + +/-! +### #3. A Boolean Nat Equality Predicate + +Write a function, *nat_eq : Nat → Nat → Bool*, that +takes any two natural numbers and that returns Boolean +*true* if they're equal, and false otherwise. Finish +off the definition by filling the remaining hole (_). +-/ + +def nat_eq : Nat → Nat → Bool +| 0, 0 => true +| 0, n' => false +| n', 0 => false +| (n' + 1), (m' + 1) => nat_eq n' m' + +-- a few tests +#eval nat_eq 0 0 +#eval nat_eq 0 1 +#eval nat_eq 1 0 +#eval nat_eq 1 1 +#eval nat_eq 2 0 +#eval nat_eq 2 1 +#eval nat_eq 2 2 + + +/-! +### #4. Natural Number Less Than Or Equal + +Write a function, *nat_le : Nat → Nat → Bool*, that +takes any two natural numbers and that returns Boolean +*true* if the first value is less than or equal to the +second, and false otherwise. Hint: what are the relevant +cases? Match to destructure them then return the right +result *in each case*. +-/ + +-- Here + +def nat_le: Nat → Nat → Bool +| 0, 0 => true +| 0, n'=> true +| n', 0 => false +| (n' + 1), (m' + 1) => nat_le n' m' + +#eval nat_le 2 3 + +#reduce nat_le 4 5 + +/-! +### #5. Nat Number Addition + +Complete this function definition to implement +a natural number addition function. + -/ + +def add : Nat → Nat → Nat +| m, 0 => m +| m, (Nat.succ n') => 1 + add m n' -- hint: recursion + + +-- Some test cases +#reduce add 0 0 -- expect 0 +#reduce add 5 0 -- expect 5 +#reduce add 0 5 -- expect 5 +#reduce add 5 4 -- expect 9 +#reduce add 4 5 -- expect 9 +#reduce add 5 5 -- expect 10 + + +/-! +### #6. Natural Number Multiplication + +Complete this function definition to implement +a natural number multiplication function. You +can't use Lean's Nat multiplication function. +Your implementation should use productively +the add function you just definied. Wite a few +test cases to show that it appears to be working. + -/ + +def mul : Nat → Nat → Nat +| m, 0 => 0 +| m, (Nat.succ n') => add (m) (mul m n') + +#reduce mul 2 1 + +/-! +### Sum Binary Nat Function Over Range 0 to n +Define a function, sum_f, that takes a function, +f : Nat → Nat and a natural number n, and that +returns the sum of all of the values of (f k) +for k ranging from 0 to n. + +Compute expected results by hand for a few +test cases and write the tests using #reduce. +For example, you might use the squaring function +as an argument, with a nat, n, to obtain the +sum of the squares of all the numbers from 0 +to and including n. -/ +def double (n : Nat) := 2*n +def square (n: Nat) := n^2 + +def sum_f : (Nat → Nat) → Nat → Nat +| f, 0 => f 0 +| f, n' + 1 => add (f (n'+1)) (sum_f f n') + +#reduce sum_f double 3 +#reduce sum_f double 4 +#reduce sum_f square 4 + + + + From 8cce657f09fb776e195038bd8a43c5e3ad10b534 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Thu, 5 Oct 2023 19:39:37 +0000 Subject: [PATCH 07/15] df --- Instructor/Homework/hw5.lean | 1 + Student/ hw6_student.lean | 275 ++++++++++++++++++++++++++ Student/lecture_12_student.lean | 338 ++++++++++++++++++++++++++++++++ 3 files changed, 614 insertions(+) create mode 100644 Student/ hw6_student.lean create mode 100644 Student/lecture_12_student.lean diff --git a/Instructor/Homework/hw5.lean b/Instructor/Homework/hw5.lean index 81a71bef..89e4ff82 100644 --- a/Instructor/Homework/hw5.lean +++ b/Instructor/Homework/hw5.lean @@ -79,6 +79,7 @@ def not_either_not_both { jam cheese } : | Sum.inl nojam => (fun _ => _) | Sum.inr _ => _ + /-! ### #2: Not One or Not the Other Implies Not Both Now prove this principle *in general* by defining a diff --git a/Student/ hw6_student.lean b/Student/ hw6_student.lean new file mode 100644 index 00000000..06fa6169 --- /dev/null +++ b/Student/ hw6_student.lean @@ -0,0 +1,275 @@ +/-! +# Homework 6 + +The established rules apply. Do this work on your own. + +This homework will test and strengthen your understanding +of inductive data types, including Nat and List α, and the +use of recursive functions to process and construct values +of such types. + +The second part will test and develop your knowledge and +understanding of formal languages, and propositional logic, +(PL) in particular, including the syntax and semantics of +PL, and translations between formal statements in PL and +corresponding concrete English-language examples. +-/ + +/-! +## Part 1: Inductive Types and Recursive Functions +-/ + +/-! +### #1: Iterated Function Application + +Here are two functions, each of which takes a function, +*f*, as an argument, and an argument, *a*, to that function, +and returns the result of applying *f* to *a* one or more +times. The first function just applies *f* to *a* once. The +second function applies *f* to *a* twice. Be sure you fully +understand these definitions before proceeding. +-/ + +def apply {a : α} : (α → α) → α → α +| f, a => f a + +def apply_twice {a : α} : (α → α) → α → α +| f, a => f (f a) + +/-! +Your job now is to define a function, *apply_n*, that takes +a function, *f*, a natural number, *n*, and an argument, +*a*, and that returns the result of applying *f* to *a n* +times. Define the result of applying *f* to *a* zero times +as just *a*. Hint: recursion on *n*. That is, you will have +two cases: where *n* is *0*; and where *n* is greater than +*0*, and can thus be written as *(1 + n')* for some smaller +natural number, *n'*. +-/ + +-- Answer here + +def apply_n {α : Type} : (α → α) → α → Nat → α +| f, a, 0 => a +| f, a, (n' + 1) => f (apply_n f a n') + +-- Test cases: confirm that expectations are correct + +-- apply Nat.succ to zero four times +#eval apply_n Nat.succ 0 4 -- expect 4 + +-- apply "double" to 2 four times +#eval apply_n (λ n => 2*n) 2 4 -- expect 32 + +-- apply "square" to 2 four times +#eval apply_n (λ n => n^2) 2 4 -- expect 65536 + + +/-! +# A Short Introduction to Lists + +The polymorphic data type, List α, is used to +represent lists of values of any type, α. The +List type builder provides two constructors: +one to create and empty list of α values, and +one to construct a new non-empty list from a +new element (head, of type α) and a one smaller +list (tail, of type List α). Here's how the List +type builder is defined (simplied just a tad). +-/ + +namespace cs2120 + +inductive List (α : Type): Type +| nil : List α +| cons (h : α) (t : List α) : List α + +end cs2120 + +/-! +Lean defines three useful notations for creating +and destructuring lists. + +- [] means List.nil +- h::t means cons h t +- [1, 2, 3] means the list, 1::[2,3] + - which means 1::2::[3] + - which means 1::2::3::[] + - which means cons 1 (cons 2 (cons 3 nil)) +-/ + +#check ([] : List Nat) +#check 1::[2,3] +#check [1,2,3] + +/-! +You can use these notations when pattern +matching to analyze arguments. Here we show +how this work by defining a function that +takes a list and returns *either* (using a +sum type) *unit* to represent the case where +there is no first element, or the value at +the head of the list. +-/ + +def first_elt : List Nat → Unit ⊕ Nat +| [] => Sum.inl Unit.unit +| h::_ => Sum.inr h + +#reduce first_elt [] -- expect Sum.inl unit +#reduce first_elt [1,2] -- expect 1 (left) + + +/-! +### #2: List length function + +Lists are defined inductively in Lean. A list of +values of some type α is either the empty list of +α values, denoted [], or an α value followed by a +shorter list of α values, denoted *h::t*, where +*h* (the *head* of the list) is a single value of +type α, and *t* is a shorter list of α values. +The base case is of course the empty list. Define +a function called *len* that takes a list of +values of any type, α, and that returns the length +of the list. +-/ + +def len {α : Type} : List α → Nat +| [] => 0 +| h::t => 1 + len t + +#eval @len Nat [] -- expect 0 +#eval len [0,1,2] -- expect 3 +#eval len ["I", "Love", "Logic!"] -- expect 3 + + +/-! +### #3: Reduce a List of Bool to a Bool + +Define a function that takes a list of Boolean +values and that "reduces" it to a single Boolean +value, which it returns, where the return value +is true if all elements are true and otherwise +is false. Call your function *reduce_and*. + +Hint: the answer is the result of applying *and* +to two arguments: (1) the first element, and (2) +the result of recursively reducing the rest of the +list. You will have to figure out what the return +value for the base case of an empty list needs to +be for your function to work in all cases. +-/ + +def reduce_and : List Bool → Bool +| [] => true +| h::t => and h (reduce_and t) + +-- Test cases + +#eval reduce_and [true] -- expect true +#eval reduce_and [false] -- expect false +#eval reduce_and [true, true] -- expect true +#eval reduce_and [false, true] -- expect false + + +/-! +### #4 Negate a List of Booleans + +Define a function, call it (map_not) that takes a +list of Boolean values and returns a list of Boolean +values, where each entry in the returned list is the +negation of the corresonding element in the given +list of Booleans. For example, *map_not [true, false]* +should return [false, true]. +-/ + +def map_not : List Bool → List Bool +| [] => [] +| h::t => (not h)::(map_not t) -- hint: use :: to construct answer + +-- test cases +#eval map_not [] -- exect [] +#eval map_not [true, false] -- expect [false, true] + +/-! +### #5 List the First n Natural Numbers + +Define a function called *countdown* that takes a +natural number argument, *n*, and that returns a list +of all the natural numbers from *n* to *0*, inclusive. +-/ + +-- Your answer here +def countdown : Nat → List Nat +| 0 => [0] +| n' + 1 => (n'+1)::(countdown n') + +-- test cases +#eval countdown 0 -- expect [0] +#eval countdown 5 -- expect [5,4,3,2,1,0] + + +/-! +### #6: List concatenation + +Suppose Lean didn't provide the List.append function, +denoted *++*. Write your own list append function. Call +it *concat*. For any type *α*, it takes two arguments of +type *List α* and returns a result of type *List α,* the +result of appending the second list to the first. Hint: +do case analysis on the first argument, and think about +this function as an analog of natural number addition. +-/ + +-- Here + +def concat {α : Type} : List α → List α → List α +| [], m => m +| h::t , n => (h::(concat t n)) + +-- Test cases + +#eval concat [1,2,3] [] -- expect [1,2,3] +#eval concat [] [1,2,3] -- expect [1,2,3] +#eval concat [1,2] [3,4] -- expect [1,2,3,4] + +/-! +### #7: Lift Element to List + +Write a function, *pure'*, that takes a value, *a*, of any +type α, and that returns a value of type List α containing +just that one element. +-/ + +-- Here + +def pure' {α : Type} : α → List α +| a => [a] + +#eval pure' "Hi" -- expect ["Hi"] + +/-! +### Challenge: List Reverse + +Define a function, list_rev, that takes a list of values +of any type and that returns it in reverse order. Hint: you +can't use :: with a single value on the right; it needs a +list on the right. Instead, consider using *concat*. +-/ + +-- Answer here: + +def list_rev {α : Type} : List α → List α +| [] => [] +| h::t => (concat (list_rev t) [h]) + +#eval list_rev [1,2,3] +#eval list_rev [1,2,3,4,5,6,6] +#eval list_rev ["h", "g", "h", "g", "t"] +#eval list_rev [1,2] +/-! +## Part 2: Propositional Logic: Syntax and Semantics + +Forthcoming as an update to this file. +-/ diff --git a/Student/lecture_12_student.lean b/Student/lecture_12_student.lean new file mode 100644 index 00000000..f3daf8eb --- /dev/null +++ b/Student/lecture_12_student.lean @@ -0,0 +1,338 @@ +/-! +# Propositional Logic: Review and Practice +-/ + +/-! +## Specification of Propositional Logic + +We begin by reproducing our formal specification +of the syntax and semantics of propositional logic, +without distracting test cases, implementation +alternatives, or explanatory text. +-/ + +/-! +### Abstract Syntax +-/ + +structure var : Type := +(n: Nat) + +inductive unary_op : Type +| not + +inductive binary_op : Type +| and +| or +| imp +| bi_imp + +inductive Expr : Type +| var_exp (v : var) +| un_exp (op : unary_op) (e : Expr) +| bin_exp (op : binary_op) (e1 e2 : Expr) + +/-! +### Concrete Syntax +-/ + +notation "{"v"}" => Expr.var_exp v +prefix:max "¬" => Expr.un_exp unary_op.not +infixr:35 " ∧ " => Expr.bin_exp binary_op.and +infixr:30 " ∨ " => Expr.bin_exp binary_op.or +infixr:25 " ⇒ " => Expr.bin_exp binary_op.imp +infixr:20 " ⇔ " => Expr.bin_exp binary_op.iff + +/-! +### Semantics +-/ + +def eval_un_op : unary_op → (Bool → Bool) +| unary_op.not => not + +def implies : Bool → Bool → Bool +| true, false => false +| _, _ => true + +def bi_implies : Bool → Bool → Bool +| true, true => true +| false, false => true +| _, _ => false + +def eval_bin_op : binary_op → (Bool → Bool → Bool) +| binary_op.and => and +| binary_op.or => or +| binary_op.imp => implies +| binary_op.bi_imp => bi_implies + +def Interp := var → Bool + +def eval_expr : Expr → Interp → Bool +| (Expr.var_exp v), i => i v +| (Expr.un_exp op e), i => (eval_un_op op) (eval_expr e i) +| (Expr.bin_exp op e1 e2), i => (eval_bin_op op) (eval_expr e1 i) (eval_expr e2 i) + +/-! +## Review and Practice + +### Propositions + +A proposition is an expression that asserts that +some *state of affairs* holds in some world, real +or imaginary. It makes sense to ask whether a given +proposition is true or false in some world. + +Here's an example of a proposition: "The red block +is on top of the blue block." It makes sense to ask, +"Is it true that the red block is on top of the blue +block?" However, to answer this question, we also +have to specify a *world* in which we are to evaluate +it truth or falsity. + +For example, imagine two children, say Bob and Sally, +each playing with blocks. We can ask "Is it true that +the red block is on top of the blue block *in Sally's +world?*" We can ask "Is it true that the red block is +on top of the blue block *in Bob's world?*" And we may +well get different answers. We evaluate the truth of +a proposition in a specified world. + +### Propositional Logic + +There are many different logics. Each provides a +*language* of propositions, different kinds of worlds, +and formal methods for assessing the truth of a given +proposition in a given world. + +Propositional logic is an especially simple logic. +It provides a language of *atomic propositions*, a +way of building larger propositions by combining +smaller ones (using the not (¬), and (∧), or (∨), +implies (⇒), and equivalence (↔) connectives, and +a recursive function for evaluating the truth of an +expression given (a world) a function that assigns +Boolean values to each propositional variable that +might appear in the proposition. + +### Variables Represent Atomic Propositions + +In propositional logic, one represents an atomic +proposition using a variable name. For example, +one could represent the atomic proposition, *The +red block is on top of the blue block* using the +variable, *red_block_on_blue_block*. Similarly, +one could represent the atomic proposition, *The +yellow block is on the red block* using the rather +verbose variable, *yellow_block_on_red_block*. + +### Larger Propositions Are Built Using Connectives + +We could then write the larger proposition, *The red +block is on the blue block AND the yellow block +is on the red block* as *red_block_on_blue_block +∧ yellow_block_on_red_block*. More generally, we +can form larger propositions by applying logical +*connectives* such as ¬, ∧, and ∨, to (the right +number of) smaller propositions, bottoming out at +atomic propositions. + +### Abstracting to Short Variable Names + +Using long and expressive variable names makes +larger propositions hard to write and read. The +usual practice, then is to use single character +variable names to represent atomic propositions. + +Here for example we might just use *r* to represent +the *red on blue* proposition and *y* to represent +the *yellow on red* proposition. Now we can write +the concise, formal expression, *r ∧ y*, to stand +for the proposition that *The red block is on the +blue block *and* the yellow block is on the red +block*. In practice one could provide an informal +translation table linking short variable names to +their intended natural language meanings. + +| variable | intended meaning | +|----------|------------------------------| +| r | red block is on blue block | +| y | yellow block is on red block | + +### Abstracting from Real-World Meanings + +The underlying purpose of a logic is to provide a +way to express propositions in such a way that we +can then reason about their truth or falsity using +only the rules of logic, without further reference +to their intended informal meanings. We translate +natural thoughts into mathematical representations +(logic) then use the mathematics to reason further, +and finally we can translate logical conclusions +back into natural world meanings at the end of the +process. + +### Validity and Unsatisfiability + +Furthermore, when studying logic, we are often +interested in whether a given proposition in true +or false *independent of the meanings of its parts*. +For example, in propositional logic, the proposition, +*r ∧ ¬r* cannot be true no matter what natural language +proposition *r* means: as a proposition cannot be true +and false. We call such a proposition *unsatisfiable.* + +Similarly, the proposition, *r ∨ ¬r* is always true +in propositional logic: as a proposition can *only* +be true or false, and in either case one of the two +sub-expressions will be true, so the overall one will +be true as well. We call such a proposition *valid*. + +For numerous reasons, then, we'll usually use single +letters to represent (natural language) propositions, +and moreover, we'll often do so without referring to +any particular natural language translations. That is, +we'll study logic *in the abstract*. When we show that +an abstract proposition is valid, then we can plus in +any informal meanings we want for the variables and we +still still have logically correct statements. + +Consider, for example, the valid abstract proposition, +*A ∧ B ⇒ A.* Now suppose *A* means "the cat is old" and +B means "the dog is a puppy." Then the logical statement +means *if the cat is old AND the dog is a puppy THEN the +cat is old.* Valid propositions thus emerge as *general* +principles for logically sound reasoning, no matter what +the atomic propositional variables are defined to mean. +-/ + +/-! +## HOMEWORK: + +Refer to each of the problems in HW5, Part 1. +For each one, express the proposition that each function +type represents using our formal notation for propositional +logic. We'll take you through this exercise in steps. +-/ + +/-! +### #1. Propositional Variables + +First, define *b, c,* *j,* and *a* as propositional variables +(of type *var*). We'll use *b* for *bread* or *beta*,* *c* for +*cheese,* *j* for *jam,* and *a* for α*. +-/ +def b := var.mk 0 +def c := var.mk 2 +def j := var.mk 1 +def a := var.mk 3 + +-- get the index out of the c structure + +#eval c.n + +/-! +### #2. Atomic Propositions + +Define B, C, J and A as corresponding atomic propositions, +of type *Expr*. +-/ +def B := {b} +def C := {c} +def J := {j} +def A := {a} + +-- ((no jam) ⊕ (no cheese)) → (no (jam × cheese)) + + + + + +/-! +### #3. Compound Propositions + +Now define the variables, e0 through e3, as expressions +in propositional logic using the concrete syntax we've +defined. +-/ + +-- #1. ((no jam) ⊕ (no cheese)) → (no (jam × cheese)) +def e0 := (¬J ∨ ¬C) ⇒ ¬(J ∧ C) +#eval eval_expr e0 (λ v => false) + +-- YOU DO THE REST + +-- (α → Empty ⊕ β → Empty) → (α × β → Empty) +def e1 := (¬A ∨ ¬B) ⇒ ¬(A ∧ B) +#eval eval_expr e1 (λ v => false) + +-- (no (α ⊕ β)) → ((no α) × (no β)) +def e2 := ¬(A ∨ B) ⇒ ((¬ A) ∧ (¬ B)) +#eval eval_expr e2 (λ v => true) + +-- ((no α) × (no β)) → (no (α ⊕ β)) +def e3 := ((¬ A) ∧ (¬ B)) ⇒ ¬(A ∨ B) +#eval eval_expr e3 (λ v => true) +/-! +### #4. Implement Syntax and Semantics for Implies and Biimplication +Next go back and extend our formalism to support the implies connective. +Do the same for biimplication while you're at it. This is already done +for *implies*. Your job is to do the same for bi-implication, which +Lean does not implement natively. +-/ + +/-! +### #5. Evaluate Propositions in Various Worlds + +Now evaluate each of these expressions under the all_true and all_false +interpretations. These are just two of the possible interpretations so +we won't have complete proofs of validity, but at least we expect them +to evaluate to true under both the all_true and all_false interpretations. +-/ + +#eval eval_expr e0 (λ _ => false) -- expect true +#eval eval_expr e0 (λ _ => true) -- expect true + +-- You do the rest +#eval eval_expr e1 (λ _ => false) +#eval eval_expr e1 (λ _ => true) + +#eval eval_expr e2 (λ _ => false) +#eval eval_expr e2 (λ _ => true) + +#eval eval_expr e3 (λ _ => false) +#eval eval_expr e3 (λ _ => true) +/-! +### #6. Evaluate the Expressions Under Some Other Interpretation + +Other than these two, evaluate the propositions under your new +interpretation, and confirm that they still evaluate to true. +Your interpretation should assign various true and false values +to *j, c, b,* and *a.* An interpretation has to give values to +all (infinitely many) variables. You can do case analysis by +pattern matching on a few specific variables (by index) then +use wildcard matching to handle all remaining cases. +-/ + +-- Answer here + +def tf : var → Bool -- B and C are true, A and J are false +| var.mk 0 => true +| var.mk 2 => true +| _ => false + +def ft : var → Bool -- B and C are false, A and J are true +| var.mk 0 => false +| var.mk 2 => false +| _ => true + +#eval eval_expr e0 tf +#eval eval_expr e0 ft + +#eval eval_expr e1 tf +#eval eval_expr e1 ft + +#eval eval_expr e2 tf +#eval eval_expr e2 ft + +#eval eval_expr e3 tf +#eval eval_expr e3 ft \ No newline at end of file From 66018ec4210e8984df7be66da55fc89b3462fb39 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Tue, 10 Oct 2023 04:12:11 +0000 Subject: [PATCH 08/15] f --- Student/lecture_13_student.lean | 696 ++++++++++++++++++++++++++++++++ 1 file changed, 696 insertions(+) create mode 100644 Student/lecture_13_student.lean diff --git a/Student/lecture_13_student.lean b/Student/lecture_13_student.lean new file mode 100644 index 00000000..8084f858 --- /dev/null +++ b/Student/lecture_13_student.lean @@ -0,0 +1,696 @@ +/-! + +# Propositional Logic: A Satisfiability Solver + +You will recall that we say that an expression (formula) in +propositional logic is *valid* if it evaluates to true under +all possible interpretations (Boolean values for the variables +that appear in it). It is *satisfiable* if there is at least +one intepretation under which it is true. And *unsatisfiable* +if there's not even on interpretation that makes the expression +true. + +We can understand these concepts clearly in terms of truth +tables. As we've seen, each of the *input* columns represents +a variable that can appear in the expression being evaluated. +Each row up to but no including the final column represents +one of the possible intepretations of the variables in the +column (a function mapping those variables to Boolean values). +The final column lists the values of the expression for each +of the intepretations. + +As an example, consider the propositional logic expression, +*X*. Here's the truth table for it. There's one input column, +for the one variable. There are two interpretations (rows), +that we can write as *{X ↦ T}* and *{X ↦ F}*. Finally, the +output column gives the truth value of the expression we're +evaluating, *X*, under each interpretation. + +| v₀ | {v₀} | +|----|------| +| T | T | +| F | F | + +From such a truth table it's trivial to determine whether a +formula is valid, satisfiable, or unsatisfiable. If all output +values are true, the formula is valid. It is also said to be +a *tautology*. If at least one output is true, the formula +is *satisfiable*, and the interpretations that make it true +are said to be *models* of the formula, or *solutions*. And +if all the outputs are false, the expression is *unsatisfiable* +and so has no models/solutions. From the truth table here, we +can easily see that *X* is not valid, but it is satisfiable, +with *{X ↦ T}* as a model. + +Exercise: Is the propositional logic expression, *X ∧ ¬X* +valid, satisfiable, or unsatisfiable? What are its models, +if any? + +| v₀ | {v₀} ∧ ¬{v₀} | +|----|--------------| +| T | _ | +| F | _ | + +Exercise: Answer the same two questions for *X ∨ ¬X*. To +derive your answers, fill in and analyze the truth tables +for the two expressions. + + +| v₀ | {v₀} ∨ ¬{v₀} | +|----|--------------| +| T | _ | +| F | _ | + + + +## Chapter Plan + +In the rest of this chapter we'll see how we can perhaps +automate the generation of truth tables for any expressions, +and thus have what we need to automatically determine its +validity, satisfiability, or unsatisfiability, and models +if any. + +The key element will be a function that, when given the number, +*v,* of variables (columns) in a truth table, computes a list of +all *2^v* interpretation functions. By evaluating the expression +under each interpretation we'll get the list of output values +in the truth table. We then just analyze that list to determine +whether all, some, or none of the output values is true. The +models of the expression, if any, are the interpretations in +the rows that have true output values. + +The rest of this chapter is in several sections. First we +include our specification of the syntax and semantics of +propositional logic, without commentary. You can skip over +this section unless you need to review the definitions. + +Next, we define a function that when given the number of +variables, *v*, in an expression, computes the input side +of a truth table as a list of *2^v* rows, Each row is a +distinct list of *v* Boolean values, as would appear in +a paper-and-pencil truth table, and represents one of the +*2^v* possible interpretations of the variables that might +appear in the expression. The *i'th* entry in each row is +the value assigned by that row/interpretation to the *i'th* +variable (column), with *i* ranging from *0* to *v-1*. + +Here's an example of the input side of a truth table for +an expression with two variables. Be sure you can explain +precisely what function, from variables to Boolean values, +each row represents. + +| | v₀ | v₁ | +|----|----|----| +| i₀ | F | F | +| i₁ | F | T | +| i₂ | T | F | +| i₃ | T | T | + +We can write the interpretation functions, corresponding +to the rows, as follows. + +- i₀ = {v₀ ↦ F, v₁ ↦ F} +- i₁ = {v₀ ↦ F, v₁ ↦ T} +- i₂ = {v₀ ↦ T, v₁ ↦ F} +- i₃ = {v₀ ↦ T, v₁ ↦ T} + +Next, the trickiest part, we define a function that takes +as its input a row of Boolean values, *{b₀, ..., bᵥ₋₁}* and +that returns the corresponding interpretation *function*, of +type *Interp* (i.e., *var → Bool*). We can then use such a +function as an argument to our semantic function, *eval_expr* +to compute the truth value of a given expression under that +particular interpretation. + +Each such interpretation function behaves as follows. For *i* +in the range of *0* to *v-1*, given the *i'th* variable, *vᵢ* +*(mk_var i)* as input, it returns *bᵢ*, the Boolean value +in the *i'th* position in the row of Boolean values, as its +output. Otherwise, for variables with *i* indices outside +the range of interest, it just returns a default value, here +*false*. + +Next, we define a function that takes a list of all *2^v* +rows of a truth table, containing Boolean values, and convert +it into a list of interpretation functions, of type *Interp*. + +Finally, from there, we can easily implement the functions that +we really want: take any propositional logic expression and tell +us if it's valid, satisfiable, or unsatisfiable. We will also +want to be able to get a list of models (interpretations) for +any given expression. +-/ + +/-! +## Propositional Logic Syntax and Semantics + +Here we just repeat our specification of the syntax and semantics +of propositional logic. By know you are expected to understand the +concrete syntax of propositional logic, as defined here, and also +the purpose of the eval_expr semantic evaluator, that takes any +expression in this syntax along with an interpretation and returns +the truth value of the expression under the given interpretation of +its atomic propositional variables. +-/ + +structure var : Type := (n: Nat) +inductive unary_op : Type | not +inductive binary_op : Type +| and +| or +| imp +inductive Expr : Type +| var_exp (v : var) +| un_exp (op : unary_op) (e : Expr) +| bin_exp (op : binary_op) (e1 e2 : Expr) +notation "{"v"}" => Expr.var_exp v +prefix:max "¬" => Expr.un_exp unary_op.not +infixr:35 " ∧ " => Expr.bin_exp binary_op.and +infixr:30 " ∨ " => Expr.bin_exp binary_op.or +infixr:25 " ⇒ " => Expr.bin_exp binary_op.imp +infixr:20 " ⇔ " => Expr.bin_exp binary_op.iff +def eval_un_op : unary_op → (Bool → Bool) +| unary_op.not => not +def implies : Bool → Bool → Bool +| true, false => false +| _, _ => true +def eval_bin_op : binary_op → (Bool → Bool → Bool) +| binary_op.and => and +| binary_op.or => or +| binary_op.imp => implies +def Interp := var → Bool +def eval_expr : Expr → Interp → Bool +| (Expr.var_exp v), i => i v +| (Expr.un_exp op e), i => (eval_un_op op) (eval_expr e i) +| (Expr.bin_exp op e1 e2), i => (eval_bin_op op) (eval_expr e1 i) (eval_expr e2 i) + + +/-! +## Generating Truth Table Rows + +Our first function will take as its input the number of +variables (columns), *v*, for which the input side of a +truth table is to be generated. We will represent this +value as list of *2^v* rows, each of which is a list list +of *v* Boolean values. Here, for example, is a table that +represents the input side of a truth table with three +variables. + +| | v₀ | v₁ | v₃ | +|---|------|------|------| +| 0 | F | F | F | +| 1 | F | F | T | +| 2 | F | T | F | +| 3 | F | T | T | +| 4 | T | F | F | +| 5 | T | F | T | +| 6 | T | T | F | +| 7 | T | T | T | + +### Key Insight + +Let's see what pattern emerges if we replace each true +and false value in this table with a corresponding binary +digit (bit), *0* for *false* and *1* for *true*. + + +| | v₀ | v₁ | v₃ | +|------|------|------|------| +| 0 | 0 | 0 | 0 | +| 1 | 0 | 0 | 1 | +| 2 | 0 | 1 | 0 | +| 3 | 0 | 1 | 1 | +| 4 | 1 | 0 | 0 | +| 5 | 1 | 0 | 1 | +| 6 | 1 | 1 | 0 | +| 7 | 1 | 1 | 1 | + +Do you see it? What's the relationship between +the row index and the sequence of binary digits +in each row? Think about it before reading on. +The answer: each row of binary digits represents +the corresponding row index in binary notation. +In other words, the rows of a truth table really +correspond directly to binary representations of +corresponding row *indices*, ranging from *0* to +*2^v - 1*. The upshot is that we can compute the +*r'th* row of the input side of a truth table +by doing nothing more than representing the row +*index* as a binary numeral, padded with zeros on +the left so that it's at least *v* digits wide, +and then replacing the binary digits with the +corresponding Boolean truth values. + +### Algorithm Design + +These insights unlock an algorithm design strategy. +First, define a function that, when given a number +of variables, *v*, and a row index, *r*, returns the +interpretation that associates the variables in the +columns to the Boolean represented by the bit values +in the binary representation of *r*. + +Once we have this function, it'll be straightforward +to take a number, *v*, of variables, *v*, and return +a list of the *2^v* rows for the input side of a truth +table. +-/ + +/-! +### Row Index to List of Binary Digits + +We'll start by defining a function that converts a +given natural number into its binary representation +as a *list* of binary digits. We'll just the natural +numbers, *0* and *1* to represent binary digits. + +The key observations here are (1) the rightmost bit +of a binary representation of a number is *0* if the +number is even and *1* if it's odd; (2) the rest of +the bits are shifted one place to the right, dropping +the rightmost bit, by division by 2. So what we do, +while bits remain is to repeatedly (1) compute the +rightmost bit, (2) shift the rest of the bits right. + +To illustrate, consider row number, *5*. The binary +representation of *5* is *101*. From *right* to *left* +that's 1*2^0 + 0*2^1 + 1*2^2 = 1 + 0 + 4 = 5.* The +number is odd so the rightmost bit is *1*, which is +the remainder, 5 % 2 = 1*. Now recurse on *5* shifted +right by one bit, i.e., on *5/2* using natural number +division. *5/2* is *2*, so the nexxt bit is *2 % 2 = 0.* +Recursing on *2 / 2 = 1,* we find the next bit to be +*1 % 2 = 1.* Finally recurse on *1 / 2 = 0*. Zero is +one of the two base cases, so we're done. The result +is the sequence of bits *[1, 0, 1]*. Exercise: Do it +yourself for *6*. What's the second base case? + +Here's code. We abstract the computations of the right +bit and shift right operations as named functions. The +recursion, sadly, is not structural. +-/ + +def right_bit (n : Nat) := n%2 + +def shift_right (n : Nat) := n/2 + +def nat_to_bin : Nat → List Nat +| 0 => [0] +| 1 => [1] +| n' + 2 => + have : (shift_right (n' + 2)) < (n' + 2) := sorry + nat_to_bin (shift_right (n' + 2)) ++ [right_bit (n' + 2)] + +/-! +Okay, you're wondering, what's that weird expression, +*have : (shift_right (n' + 2)) < (n' + 2) := sorry?* +To make a long story short, the recursion here is not +structural. (Why?) That means that Lean won't be able +to prove to itself that the argument to the recursion is +always decreasing, which is needed to prove that the +recursion always terminates. To avoid Lean giving an +error, we have to give Lean an explicit proof that the +argument decreases on each recursive call. The mystery +code tells Lean that we have such a proof. The *sorry* +says, *but we're not going to give it now; just trust +us.* That's good enough for Lean not to complain. For +now, that's what you need to know. +-/ + +#eval nat_to_bin 0 -- expect [0] +#eval nat_to_bin 1 -- expect [1] +#eval nat_to_bin 3 -- expect [1,1] +#eval nat_to_bin 5 -- expect [1,0,1] +#eval nat_to_bin 6 -- expect [1,1,0] + +/-! +### Left Pad with Zeros + +We represent *F* (false) values in our truth tables +as zeros. One remaining issue is that we have to make +each output list of binary digits *v* digits long, to +include *F* values (zeros) on the left. That is, we +need to left-pad our lists with zeros so that each +list is at least *v* digits wide. + +To do so, we'll iteratively prepend zeros to a given +list a number of times equal to *v* minus the length +of a given list. In Lean *v - l* is zero in all cases +where *l ≥ v* (these are natural numbers), so there is +nothing left to do if the input list is long enough. + +It's a pretty easy recursion. We take this opportunity +to show how you can do a little top-down programming +by writing a top-level program that uses one that is +then provided as a sort of private subroutine. Learn +how to write code like this. + +Finally, we note that it's very common to implement a +function as a non-recursive top-level program that in +turn calls a recursive subroutine, as illustrated here. +-/ + +def zero_pad : Nat → List Nat → List Nat + | v, l => zero_pad_recursive (v - (l.length)) l +where zero_pad_recursive : Nat → List Nat → List Nat + | 0, l => l + | v'+1, l => zero_pad_recursive v' (0::l) + +#eval zero_pad 3 [0] +#eval zero_pad 3 [1] +#eval zero_pad 3 [1,1] +#eval zero_pad 3 [0,1,1] +#eval zero_pad 3 [1,0,1] +#eval zero_pad 5 [1,0,1] + +/-! +We can now write a function that will produce the +required list of binary digits for the (input part +of the) n'th row of a truth table with v variables +(columns). +-/ + +def mk_bit_row : (row: Nat) → (cols : Nat) → List Nat +| r, c => zero_pad c (nat_to_bin r) + +#eval mk_bit_row 5 6 -- expect [0, 0, 0, 1, 0, 1] + +/-! +### List of Bits to List of Bools + +Next we need a function to convert a list of bits +(Nats) to a list of corresponding Bools. We will +convert Nat zero to false, and any other Nat to +true. +-/ + +-- Convert nat to bool where 0 ↦ false, ¬0 ↦ true +def bit_to_bool : Nat → Bool +| 0 => false +| _ => true + +#eval bit_to_bool 0 -- expect false +#eval bit_to_bool 1 -- expect true + +/-! +With this element conversion function we can now define +our list conversion function. There are two cases. First,' +given an empty list of Nat we return an empty list of Bool. +Second, we have a bit (Nat) at the head of a non-empty list, +in which case we return a list with that Nat converted to a +Bool at the head, and the conversion of t, the rest of the +list of Nats, into a list of Bools recursively. +-/ + +def bit_list_to_bool_list : List Nat → List Bool +| [] => [] +| h::t => (bit_to_bool h) :: (bit_list_to_bool_list t) + +-- expect [false, false, false, true, false, true] +#eval bit_list_to_bool_list [0, 0, 0, 1, 0, 1] + +/-! +### Make the *r'th* Row of a Truth Table with *v* Variables +Now we can easily define a function that when given a truth +table row number and the number of variables (columns) +Given row and columns return list of Bools +-/ + +def mk_row_bools : (row : Nat) → (vars : Nat) → List Bool +| r, v => bit_list_to_bool_list (mk_bit_row r v) + + -- expect [false, false, false, true, false, true] +#eval mk_row_bools 5 6 + +/-! +## List Bool → Interp + +We now devise an algorithm to convert a list of Booleans, +[b₀, ..., bᵥ₋₁] into an interpretation. Denote *(mk_var i)*, +the *i'th* variable, as *vᵢ*. The idea then is to convert +the Boolean list into an interpretation function with the +following behavior: *{ v₀ ↦ b₀, ..., vᵥ₋₁, vᵢ → false for +i ≥ v}*. + +So how do we turn a list of values into a function from +variables to Boolean values? In short, we start with some +interpretation, given a variable and a value for it, we +return a new function that's exactly the same as the given +one *except* when the variable argument is the same as the +variable for which we want to return a new value. In that +case, the new function returns the new value when it is +applied to the variable whose value is being overridden. +The top-level algorithm will then iteratively override the +value of each variable according to the values in a given +row of a truth table. + +The hardest-to-understand function is *override*. Given an +interpretation, a variable whose value is to be overridden, +and a new value for that variable, we return a function that +when given any variable does a case analysis: if the variable +is *other than* the one being override, we just use the given +interpretation to compute and return a result, otherwise the +new function returns the specified new value. +-/ + +def override : Interp → var → Bool → Interp +| old_interp, var, new_val => + (λ v => if (v.n == var.n) -- when applied to var + then new_val -- return new value + else old_interp v) -- else retur old value + +def v₀ := var.mk 0 +def v₁ := var.mk 1 +def v₂ := var.mk 2 + +-- Demonstration + +def all_false : Interp := λ _ => false + +#eval all_false v₀ -- expect false +#eval all_false v₁ -- expect false +#eval all_false v₂ -- expect false + + +-- interp for [false, true, false], i.e., [0, 1, 0] +def interp2 := override all_false v₁ true + +#eval interp2 v₀ -- expect false +#eval interp2 v₁ -- expect true +#eval interp2 v₂ -- expect false + + +-- interp for [false, true, true], i.e., [0, 1, 1] +def interp3 := override interp2 v₂ true + +#eval interp3 v₀ -- expect false +#eval interp3 v₁ -- expect true +#eval interp3 v₂ -- expect true + +/-! +To turn a list of Booleans into an interpretation, +we thus start with some base interpretation, such as +*all_false*, then iterate through the entries in the +list, repeatedly overriding the most recently computed +interpretation with the so-called *maplet, { vᵢ ↦ bᵢ }*. +The end result will be the interpretation { v₀ ↦ b₀, ..., +vᵥ₋₁ ↦ bᵥ₋₁, ...}, with all variables after *vᵥ₋₁* being +mapped to the value given by the starting interpretation +(in practice, *all_false,* for us). + +Note: We introduce a new Lean programming mechanism: the +ability to define a function in terms of a "sub-routine" +that is subsequently defined in a *where* block. It is +common to define functions this way when the top-level +function is non-recursive and takes or computes some +additional data that it then passes on to a recursive +function that does most of the work. +-/ + +def bools_to_interp : List Bool → Interp + | l => bools_to_interp_helper l.length l +where bools_to_interp_helper : (vars : Nat) → (vals : List Bool) → Interp + | _, [] => all_false + | vars, h::t => + let len := (h::t).length + override (bools_to_interp_helper vars t) (var.mk (vars - len)) h + +-- Demonstration +def interp3' := bools_to_interp [false, true, true] + +#eval interp3' v₀ -- expect false +#eval interp3' v₁ -- expect true +#eval interp3' v₂ -- expect true + + +/-! +## From Number of Variables to List of Interpretations + +Building in steps, we next define a function that takes a +number of variables and a row index and that returns the +corresponding interpretation function. It uses *mk_row_bools* +to create the right row of Boolean values then applies the +preceding *bools_to_interp* function to it to return the +corresponding interpretation function. +-/ +def mk_interp_vars_row : (vars: Nat) → (row: Nat) → Interp +| v, r => bools_to_interp (mk_row_bools r v) + +def interp3'' := mk_interp_vars_row 3 3 -- vars=3, row=3 + +-- Demonstration +#eval interp3'' v₀ -- expect false +#eval interp3'' v₁ -- expect true +#eval interp3'' v₂ -- expect true + + +/-! +Finally, now, given nothing but a number of variables, we can +iteratively generate a list of all 2^v interpretations. We use +the same style of function definition above, where the top-level +program computes *2^v* from *v* and then passes *2^v* (the number +of interpretations/rows to generate, along with *v*, the number +of variables, to a recursive function that does most of the work. +-/ +def mk_interps (vars : Nat) : List Interp := + mk_interps_helper (2^vars) vars +where mk_interps_helper : (rows : Nat) → (vars : Nat) → List Interp + | 0, _ => [] + | (n' + 1), v => (mk_interp_vars_row v n')::mk_interps_helper n' v + +/- +Generate list of 8 interpretations for three variables +-/ +def interps3 := mk_interps 3 + +#reduce interps3.length -- expect 8 + +/-! +## From List Interp and Expr to List Output Bool Values +Now how about a function that takes a list of interpretations and +an expresssion and that produces a list of output values? +-/ + +def eval_expr_interps : List Interp → Expr → List Bool +| [], _ => [] +--| h::t, e => (eval_expr e h)::eval_expr_interps t e +| h::t, e => eval_expr_interps t e ++ [eval_expr e h] + +/-! +The change in the preceding algorithm puts the list of output +values in the right order with respect to our *enumeration* of +interpretations. +-/ + +-- Demonstration ] +#reduce eval_expr_interps (mk_interps 2) ({v₀} ∧ {v₁}) -- [F,F,F,T] +#reduce eval_expr_interps (mk_interps 2) ({v₀} ∨ {v₁}) -- [F,T,T,T] + +/-! + +## From Expr to Number of Variables (Highest Variable Index) + +But our interface isn't yet ideal. We're providing an expression as +an argument, and from it we should be able to figure out how many +variables are involved. In other words, we shouldn't have to provide +a list of interpretations as a separate (and here the first) argument. +The observation that leads to a solution is that we can analyze any +expression to determine the highest index of any variable appearing +in it. If we add 1 to that index, we'll have the number of variables +in the expression and thus the number of columns in the truth table. +We can then use mk_interps with that number as an argument to create +the list of interpretations, corresponding to truth table rows, that +ne need to pass to eval_expr_interps to get the list of outputs values. +-/ + +def highest_variable_index : Expr → Nat +| Expr.var_exp (var.mk i) => i +| Expr.un_exp _ e => highest_variable_index e +| Expr.bin_exp _ e1 e2 => max (highest_variable_index e1) (highest_variable_index e2) + +#eval highest_variable_index {v₀} +#eval highest_variable_index ({v₀} ∧ {v₂}) + +/-! +## Major Result: Expr → List Bool, One For Each Interpretation + +Here's a really important function. Given an expression in propositional +logic (using our syntax) it returns the list of outputs values under each +of the possible interpretations of the variables (thus atomic expressions) +in the given expression. +-/ + +def truth_table_outputs : Expr → List Bool +| e => eval_expr_interps (mk_interps (highest_variable_index e + 1)) e + +/-! +Demonstration/Tests: Confirm that actual results are as expected by +writing out the truth tables on paper. Note that in the second case, +with the highest variable index being 2 (Z is var.mk 2), we have *3* +variables/columns, thus 8 rows, and thus a list of 8 output values. +-/ + +/-! +Let's give nicer names to three atomic propositions (i.e., variable +expressions). +-/ +def X := {v₀} +def Y := {v₁} +def Z := {v₂} + + +/-! +Now we can produce lists of outputs under all interpretations of variables +from index 0 to the highest index of any variable appearing in the given +expression. Confirm that the results are expected by writing out the +truth tables on paper, computing the expected outputs, and checking them +against what we compute here. +-/ +#reduce truth_table_outputs (X ∧ Y) +#reduce truth_table_outputs (X ∨ Z) + +-- Write the truth tables on paper then check here +#reduce truth_table_outputs ((X ∧ Y) ∨ (X ∧ Z)) +#reduce truth_table_outputs ((X ∨ Y) ∧ (X ∨ Z)) + +-- Study expression and predict outputs before looking. +-- What names would you give to these particular propositions? +#reduce truth_table_outputs ((¬(X ∧ Y) ⇒ (¬X ∨ ¬Y))) +#reduce truth_table_outputs (((¬X ∨ ¬Y) ⇒ ¬(X ∧ Y))) +#reduce truth_table_outputs ((¬(X ∨ Y ) ⇒ (¬X ∧ ¬ Y))) +#reduce truth_table_outputs (((¬X ∧ ¬ Y) ⇒ ¬(X ∨ Y ))) + + +/-! +## HOMEWORK PART 1: + +Write three functions +- sat : Expr → Bool +- unsat: Expr → Bool +- valid: Expr → Bool + +Given any expression, *e*, in propositional logic, the first returns true +if *e* is sastisfiable, otherwise false. The second returns true if *e* is +unsatisfiable, otherwise false. The third returns true if *e* is valid, and +otherwise returns false. You can write helper functions if/as needed. Write +short comments to explain what each of your functions does. Write a few test +cases to demonstrate your results. +-/ + +-- Here + +-- A few tests +#eval is_valid (X) -- expect false +#eval is_sat (X) -- exect true +#eval is_sat (X ∧ ¬X) -- expect false +#eval is_unsat (X ∧ ¬X) -- expect true +#eval is_valid (X ∨ ¬X) -- expect true +#eval is_valid ((¬(X ∧ Y) ⇒ (¬X ∨ ¬Y))) -- expect true +#eval is_valid (¬(X ∨ Y) ⇒ (¬X ∧ ¬Y)) -- expect true +#eval is_valid ((X ∨ Y) ⇒ (X → ¬Y)) -- expect false + +-- Test cases + + + From bb70e46934fead3c38652448d3e474047f25fb06 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Tue, 10 Oct 2023 19:33:39 +0000 Subject: [PATCH 09/15] k --- Instructor/Homework/hw7_part2.lean | 23 ++++-- Student/hw7_part2_student.lean | 124 +++++++++++++++++++++++++++++ Student/lecture_13_student.lean | 50 +++++++++++- 3 files changed, 190 insertions(+), 7 deletions(-) create mode 100644 Student/hw7_part2_student.lean diff --git a/Instructor/Homework/hw7_part2.lean b/Instructor/Homework/hw7_part2.lean index 984730b0..79b9041e 100644 --- a/Instructor/Homework/hw7_part2.lean +++ b/Instructor/Homework/hw7_part2.lean @@ -15,6 +15,8 @@ otherwise. -- Define your function here +def pythag : Nat → Nat → Nat → Bool +| a, b, c => if (a^2 + b^2 = c^2) then true else false -- The following test cases should then pass #eval pythag 3 4 5 -- expect true @@ -31,7 +33,9 @@ inclusive. -- Define your function here - +def sum_cubes : Nat → Nat +| 0 => 0 +| n' + 1 => (n'+1)^3 + sum_cubes n' -- test case: sum_cubes 4 = 1 + 8 + 27 + 64 = 100 @@ -61,13 +65,20 @@ Lean prover to work out a solution for each case. def prod_ors_to_or_prods {α β γ δ: Type} : (α ⊕ β) × (γ ⊕ δ) → α × γ ⊕ α × δ ⊕ β × γ ⊕ β × δ -| _ => _ -| _ => _ -| _ => _ -| _ => _ +| (Sum.inl a, Sum.inl c) => Sum.inl (a,c) +| (Sum.inl a, Sum.inr d) => Sum.inr (Sum.inl (a,d)) +| (Sum.inr b, Sum.inl c) => Sum.inr (Sum.inr (Sum.inl (b,c))) +| (Sum.inr b, Sum.inr d) => Sum.inr (Sum.inr (Sum.inr (b,d))) -- Write the second function here from scratch +def or_prods_to_prod_ors {α β γ δ: Type} : + α × γ ⊕ α × δ ⊕ β × γ ⊕ β × δ → (α ⊕ β) × (γ ⊕ δ) +| Sum.inl (a,c) => (Sum.inl a, Sum.inl c) +| Sum.inr (Sum.inl (a,d)) => (Sum.inl a, Sum.inr d) +| Sum.inr (Sum.inr (Sum.inl (b,c))) => (Sum.inr b, Sum.inl c) +| Sum.inr (Sum.inr (Sum.inr (b,d))) => (Sum.inr b, Sum.inr d) + /-! ## #4 Propositional Logic Syntax and Semantics @@ -92,6 +103,8 @@ of propositional logic. Just write the expression here using the notation we've defined. -/ +-- def e := (A ∨ O) ∧ (C ∨ B) ⇔ ((A ∧ C) ∨ (A ∧ B) ∨ (O ∧ C) ∨ (O ∧ B)) + /-! ## #5 Propositional Logic Validity At the end of your updated Homework #7 file, use our diff --git a/Student/hw7_part2_student.lean b/Student/hw7_part2_student.lean new file mode 100644 index 00000000..0d5d6102 --- /dev/null +++ b/Student/hw7_part2_student.lean @@ -0,0 +1,124 @@ +/-! +# Exam 1 + +DO NOT CHEAT. +-/ + +/-! +## #1 Easy Functions [15 points] + +Define a function, *pythag*, that takes three natural +numbers, call them *a, b,* and *c*, and that returns +*true* if *a^2 + b^2 = c^2* and that returns *false* +otherwise. +-/ + +-- Define your function here + +def pythag : Nat → Nat → Nat → Bool +| a, b, c => if (a^2 + b^2 = c^2) then true else false + +-- The following test cases should then pass +#eval pythag 3 4 5 -- expect true +#eval pythag 6 7 8 -- expect false + +/-! +## #2 Recursive Functions + +Define a function, sum_cubes, that takes any natural +number, *n*, as an argument, and that retrns the sum +of the cubes of the natural numbers from *1* up to *n* +inclusive. +-/ + +-- Define your function here + +def sum_cubes : Nat → Nat +| 0 => 0 +| n' + 1 => (n'+1)^3 + sum_cubes n' + + +-- test case: sum_cubes 4 = 1 + 8 + 27 + 64 = 100 +#eval sum_cubes 4 -- expect 100 + + +/-! +## #3 Product and Sum Types + +Define two functions, called *prod_ors_to_or_prods*, +and *or_prods_to_prod_ors* that shows that a product +of sums be converted into a sum of products in a way +that the result can then be converted back into the +original product of sums. + +As a concrete example, you might want to show that if +you have an apple or an orange and you have a cup or +a bowl, then you have an apple and a cup or an apple +and a bowl or an orange and a cup or an orange and a +bowl. + +Hints: 1. Be sure you understand the reasoning before +you try to define your functions. 2. Use four cases. 3. +Use type-guided, top-down programming, assisted by the +Lean prover to work out a solution for each case. +-/ + +def prod_ors_to_or_prods {α β γ δ: Type} : + (α ⊕ β) × (γ ⊕ δ) → α × γ ⊕ α × δ ⊕ β × γ ⊕ β × δ +| (Sum.inl a, Sum.inl c) => Sum.inl (a,c) +| (Sum.inl a, Sum.inr d) => Sum.inr (Sum.inl (a,d)) +| (Sum.inr b, Sum.inl c) => Sum.inr (Sum.inr (Sum.inl (b,c))) +| (Sum.inr b, Sum.inr d) => Sum.inr (Sum.inr (Sum.inr (b,d))) + +#reduce prod_ors_to_or_prods ((Sum.inr 3),(Sum.inr true)) + +-- Write the second function here from scratch + +def or_prods_to_prod_ors {α β γ δ: Type} : + α × γ ⊕ α × δ ⊕ β × γ ⊕ β × δ → (α ⊕ β) × (γ ⊕ δ) +| Sum.inl (a,c) => (Sum.inl a, Sum.inl c) +| Sum.inr (Sum.inl (a,d)) => (Sum.inl a, Sum.inr d) +| Sum.inr (Sum.inr (Sum.inl (b,c))) => (Sum.inr b, Sum.inl c) +| Sum.inr (Sum.inr (Sum.inr (b,d))) => (Sum.inr b, Sum.inr d) + +#reduce or_prods_to_prod_ors (prod_ors_to_or_prods ((Sum.inr 3),(Sum.inl true))) + +/-! +## #4 Propositional Logic Syntax and Semantics + +Extend your Homework #7 solution to implement the +propositional logic *iff/equivalence* (↔) operator. +Note that Lean does not natively define the *iff* +Boolean operator. +-/ + +/-! +Using our syntax for propositional logic, and the +variable names *A, O, C,* and *B*, respectively for +the propositions *I have an apple, I have an orange, +I have a cup,* and *I have a bowl* write a proposition +that having an orange or an apple and a bowl or a cup +is equivalent to having an apple and a bowl or an +apple and a cup or an orange and a bowl or an orange +and a cup. + +Note: There's no need here to use our implementation +of propositional logic. Just write the expression +here using the notation we've defined. +-/ + +def A := true +def O := true +def C := true +def B := true + +def e := (A ∨ O) ∧ (C ∨ B) ↔ ((A ∧ C) ∨ (A ∧ B) ∨ (O ∧ C) ∨ (O ∧ B)) + + +/-! +## #5 Propositional Logic Validity +At the end of your updated Homework #7 file, use our +validity checking function to check your expression +for validity, in the expectation that the checker will +determine that the expression is in fact valid. +-/ \ No newline at end of file diff --git a/Student/lecture_13_student.lean b/Student/lecture_13_student.lean index 8084f858..64bc6f7d 100644 --- a/Student/lecture_13_student.lean +++ b/Student/lecture_13_student.lean @@ -160,6 +160,7 @@ inductive binary_op : Type | and | or | imp +| bi_imp inductive Expr : Type | var_exp (v : var) | un_exp (op : unary_op) (e : Expr) @@ -169,16 +170,21 @@ prefix:max "¬" => Expr.un_exp unary_op.not infixr:35 " ∧ " => Expr.bin_exp binary_op.and infixr:30 " ∨ " => Expr.bin_exp binary_op.or infixr:25 " ⇒ " => Expr.bin_exp binary_op.imp -infixr:20 " ⇔ " => Expr.bin_exp binary_op.iff +infixr:20 " ⇔ " => Expr.bin_exp binary_op.bi_imp def eval_un_op : unary_op → (Bool → Bool) | unary_op.not => not def implies : Bool → Bool → Bool | true, false => false | _, _ => true +def bi_implies : Bool → Bool → Bool +| true, true => true +| false, false => true +| _, _ => false def eval_bin_op : binary_op → (Bool → Bool → Bool) | binary_op.and => and | binary_op.or => or | binary_op.imp => implies +| binary_op.bi_imp => bi_implies def Interp := var → Bool def eval_expr : Expr → Interp → Bool | (Expr.var_exp v), i => i v @@ -680,6 +686,33 @@ cases to demonstrate your results. -- Here +def is_sat : Expr → Bool + | e => sat_evaluator (truth_table_outputs e) +where sat_evaluator : List Bool → Bool + | [] => false + | true::_ => true + | false::t => sat_evaluator t + +-- other method -> h::t => if h then true else sat_evaluator t + +def is_valid : Expr → Bool + | e => val_evaluator (truth_table_outputs e) +where val_evaluator : List Bool → Bool + | [] => true + | true::t => val_evaluator t + | false::_ => false + +-- other method -> h::t => if (not h) then false else val_evaluator t + +def is_unsat : Expr → Bool + | e => unsat_evaluator (truth_table_outputs e) +where unsat_evaluator : List Bool → Bool + | [] => true + | false::t => unsat_evaluator t + | true::_ => false + +-- other method -> h::t => if h then false else unsat_evaluator t + -- A few tests #eval is_valid (X) -- expect false #eval is_sat (X) -- exect true @@ -688,9 +721,22 @@ cases to demonstrate your results. #eval is_valid (X ∨ ¬X) -- expect true #eval is_valid ((¬(X ∧ Y) ⇒ (¬X ∨ ¬Y))) -- expect true #eval is_valid (¬(X ∨ Y) ⇒ (¬X ∧ ¬Y)) -- expect true -#eval is_valid ((X ∨ Y) ⇒ (X → ¬Y)) -- expect false +#eval is_valid ((X ∨ Y) ⇒ (X ⇒ ¬Y)) -- expect false -- Test cases +def a := var.mk 0 +def o := var.mk 1 +def c := var.mk 2 +def b := var.mk 3 + +def A := {a} +def O := {o} +def C := {c} +def B := {b} +def e := (A ∨ O) ∧ (C ∨ B) ⇔ ((A ∧ C) ∨ (A ∧ B) ∨ (O ∧ C) ∨ (O ∧ B)) +#eval is_valid e +#eval is_sat e +#eval is_unsat e \ No newline at end of file From 2dec89c657de9a6af9db919f89272ebe75d8620b Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Mon, 16 Oct 2023 19:44:56 +0000 Subject: [PATCH 10/15] sdf --- Instructor/Exams/exam_1.lean | 40 ++- Instructor/Lectures/lecture_13.lean | 113 ++------ Student/RK - exam_1 _Student.lean | 386 ++++++++++++++++++++++++++++ 3 files changed, 437 insertions(+), 102 deletions(-) create mode 100644 Student/RK - exam_1 _Student.lean diff --git a/Instructor/Exams/exam_1.lean b/Instructor/Exams/exam_1.lean index a7b699db..75015b59 100644 --- a/Instructor/Exams/exam_1.lean +++ b/Instructor/Exams/exam_1.lean @@ -28,7 +28,8 @@ inductive binary_op : Type -- expressions (abstract syntax) inductive Expr : Type --- Extra credit answers here +| top_exp +| bot_exp | var_exp (v : var) | un_exp (op : unary_op) (e : Expr) | bin_exp (op : binary_op) (e1 e2 : Expr) @@ -74,7 +75,8 @@ def Interp := var → Bool -- The meanings of expressions "under" given interpretations def eval_expr : Expr → Interp → Bool --- Extra credit answers here +| Expr.top_exp, _ => true +| Expr.bot_exp, _ => false | (Expr.var_exp v), i => i v | (Expr.un_exp op e), i => (eval_un_op op) (eval_expr e i) | (Expr.bin_exp op e1 e2), i => (eval_bin_op op) (eval_expr e1 i) (eval_expr e2 i) @@ -155,7 +157,8 @@ where mk_interps_helper : (rows : Nat) → (vars : Nat) → List Interp -- Count the number of variables in a given expression def max_variable_index : Expr → Nat -| -- Extra credit answers here +| Expr.top_exp => 1 +| Expr.bot_exp => 0 | Expr.var_exp (var.mk i) => i | Expr.un_exp _ e => max_variable_index e | Expr.bin_exp _ e1 e2 => max (max_variable_index e1) (max_variable_index e2) @@ -213,7 +216,7 @@ type α. -- Your answer here def and_elimination {α β : Type} : α × β → α -| (_, _) => _ +| (a, _) => a /-! ### b. Funny transitivity [15 points] @@ -230,7 +233,7 @@ in this case. -- Your answer here def funny_transitivity {α β γ : Type} : (α → β) × (β → γ) → (α → γ) -| _ => _ +| (a2b, b2y) => (fun a => b2y (a2b a)) /-! @@ -244,7 +247,7 @@ assumed value (a : α) one can always derive a value of -- Your answer here def ex_empty {α β : Type} : (α → Empty) → α → β -| _, _ => _ +| a2e, a => nomatch (a2e a) /-! ## #2 Data Types @@ -259,6 +262,17 @@ and the values of type Cheese are cheddar and brie. -- Your answers here +inductive Bread : Type +| white +| wheat + +inductive Spread : Type +| jam +| pesto + +inductive Cheese : Type +| cheddar +| brie /-! ### b. An interesting inductive type [15 points] @@ -273,6 +287,10 @@ the Sandwich type. -/ -- Your answer here +-- (s: (Cheese ⊕ Spread)) +structure Sandwich : Type := (c : Bread) (s: (Cheese ⊕ Spread)) + +def v := Sandwich.mk Bread.white (Sum.inr Spread.jam) /-! ### c. Now make yourself a Sandwich [15 points] @@ -285,7 +303,7 @@ bread and jam as a spread. -- Your answer here -def jam_sandwich : Sandwich := _ +def jam_sandwich : Sandwich := Sandwich.mk Bread.wheat (Sum.inr Spread.jam) /-! @@ -317,7 +335,9 @@ as its arguments (1) type parameters (make them implicit), -- Your answer here - +def map {α β : Type} : (α → β) → List α → List β +| _, [] => [] +| a2b, h::t => (a2b h) :: (map a2b t) -- test case: use map instead of bit_list_to_bool_list -- expect [true, false, true, false, true] @@ -344,7 +364,7 @@ Your job is to extend our syntax and semantics to include ⊤ and ⊥ as valid expressions. You will have to carry out the following tasks. -- add true_exp and false_exp as constructors in Expr +- add top_exp and bottom_exp as constructors in Expr - note that we've already added concrete notation definitions - add rules for evaluating these expressions to eval_expr - add rules for these expressions to max_variable_index @@ -363,5 +383,5 @@ Recall that a model is a binding of values to variables that makes a proposition true. What value must X have to make (X ⇒ ⊥) true? --- Answer: {X is _____ } is a model of (X ⇒ ⊥) +-- Answer: {X is false} is a model of (X ⇒ ⊥) -/ diff --git a/Instructor/Lectures/lecture_13.lean b/Instructor/Lectures/lecture_13.lean index f927964d..8084f858 100644 --- a/Instructor/Lectures/lecture_13.lean +++ b/Instructor/Lectures/lecture_13.lean @@ -160,7 +160,6 @@ inductive binary_op : Type | and | or | imp -| iff inductive Expr : Type | var_exp (v : var) | un_exp (op : unary_op) (e : Expr) @@ -176,15 +175,10 @@ def eval_un_op : unary_op → (Bool → Bool) def implies : Bool → Bool → Bool | true, false => false | _, _ => true -def iff : Bool → Bool → Bool -| true, true => true -| false, false => true -| _, _ => false def eval_bin_op : binary_op → (Bool → Bool → Bool) | binary_op.and => and | binary_op.or => or | binary_op.imp => implies -| binary_op.iff => iff def Interp := var → Bool def eval_expr : Expr → Interp → Bool | (Expr.var_exp v), i => i v @@ -531,7 +525,7 @@ def interp3' := bools_to_interp [false, true, true] /-! -## From Number of Variables and Row Index to Interpretation +## From Number of Variables to List of Interpretations Building in steps, we next define a function that takes a number of variables and a row index and that returns the @@ -552,8 +546,6 @@ def interp3'' := mk_interp_vars_row 3 3 -- vars=3, row=3 /-! -## From Number of Variables to List of Interpretations - Finally, now, given nothing but a number of variables, we can iteratively generate a list of all 2^v interpretations. We use the same style of function definition above, where the top-level @@ -567,24 +559,6 @@ where mk_interps_helper : (rows : Nat) → (vars : Nat) → List Interp | 0, _ => [] | (n' + 1), v => (mk_interp_vars_row v n')::mk_interps_helper n' v - -/-! -## (e : Expression) → Nat, The Number of Variables In e --/ - --- Analyze and understand how this function works! -def max_variable_index : Expr → Nat -| Expr.var_exp (var.mk i) => i -| Expr.un_exp _ e => max_variable_index e -| Expr.bin_exp _ e1 e2 => max (max_variable_index e1) (max_variable_index e2) - -#eval max_variable_index {v₀} -#eval max_variable_index ({v₀} ∧ {v₂}) - --- Given expression, return number of variables it assumes -def num_vars : Expr → Nat := λ e => max_variable_index e + 1 - - /- Generate list of 8 interpretations for three variables -/ @@ -593,7 +567,7 @@ def interps3 := mk_interps 3 #reduce interps3.length -- expect 8 /-! -## From List Interp and Expr to List of Bool Outputs +## From List Interp and Expr to List Output Bool Values Now how about a function that takes a list of interpretations and an expresssion and that produces a list of output values? -/ @@ -604,24 +578,25 @@ def eval_expr_interps : List Interp → Expr → List Bool | h::t, e => eval_expr_interps t e ++ [eval_expr e h] /-! -The change in the preceding algorithm made after class puts the -list of output values in order with respect to our *enumeration* -of interpretations. +The change in the preceding algorithm puts the list of output +values in the right order with respect to our *enumeration* of +interpretations. -/ --- Test/Demonstration cases +-- Demonstration ] #reduce eval_expr_interps (mk_interps 2) ({v₀} ∧ {v₁}) -- [F,F,F,T] #reduce eval_expr_interps (mk_interps 2) ({v₀} ∨ {v₁}) -- [F,T,T,T] /-! -## From Expr to max Variable Index + +## From Expr to Number of Variables (Highest Variable Index) But our interface isn't yet ideal. We're providing an expression as an argument, and from it we should be able to figure out how many variables are involved. In other words, we shouldn't have to provide a list of interpretations as a separate (and here the first) argument. The observation that leads to a solution is that we can analyze any -expression to determine the max index of any variable appearing +expression to determine the highest index of any variable appearing in it. If we add 1 to that index, we'll have the number of variables in the expression and thus the number of columns in the truth table. We can then use mk_interps with that number as an argument to create @@ -629,8 +604,16 @@ the list of interpretations, corresponding to truth table rows, that ne need to pass to eval_expr_interps to get the list of outputs values. -/ +def highest_variable_index : Expr → Nat +| Expr.var_exp (var.mk i) => i +| Expr.un_exp _ e => highest_variable_index e +| Expr.bin_exp _ e1 e2 => max (highest_variable_index e1) (highest_variable_index e2) + +#eval highest_variable_index {v₀} +#eval highest_variable_index ({v₀} ∧ {v₂}) + /-! -## Expr → List Bool: One Value For Each Interpretation +## Major Result: Expr → List Bool, One For Each Interpretation Here's a really important function. Given an expression in propositional logic (using our syntax) it returns the list of outputs values under each @@ -639,12 +622,12 @@ in the given expression. -/ def truth_table_outputs : Expr → List Bool -| e => eval_expr_interps (mk_interps (num_vars e)) e +| e => eval_expr_interps (mk_interps (highest_variable_index e + 1)) e /-! Demonstration/Tests: Confirm that actual results are as expected by writing out the truth tables on paper. Note that in the second case, -with the max variable index being 2 (Z is var.mk 2), we have *3* +with the highest variable index being 2 (Z is var.mk 2), we have *3* variables/columns, thus 8 rows, and thus a list of 8 output values. -/ @@ -659,7 +642,7 @@ def Z := {v₂} /-! Now we can produce lists of outputs under all interpretations of variables -from index 0 to the max index of any variable appearing in the given +from index 0 to the highest index of any variable appearing in the given expression. Confirm that the results are expected by writing out the truth tables on paper, computing the expected outputs, and checking them against what we compute here. @@ -709,59 +692,5 @@ cases to demonstrate your results. -- Test cases -/-! -## A SAT Solver - -A SAT solver takes an expression, e, and returns a value of -the sum type, *SomeOrNone*, an instance of which which holds -either *some model,* if there is at least one, or *nothing*. -We use a sum type, Interp ⊕ Unit: *(Sum.inl m)* returns the -model, *m*, while *Sum.inr Unit.unit* signals that there is -no model to return. --/ - -def SomeModelOrNone := Interp ⊕ Unit -- This is a *type* - - -/- -Here's the function. Note thus use of several "let bindings" -in this code. They bind names, as shorthands, to given terms, -so a final return value can be expressed more succinctly and -clearly. This is a common style of coding in most functional -programming languages. Here we bind names to two terms, then -the expression, *find_model interps e*, defines the return -value. --/ -def get_model_fun : Expr → SomeModelOrNone -| e => - let num_vars := num_vars e - let interps := (mk_interps num_vars) - find_model interps e -where find_model : List Interp → Expr → SomeModelOrNone -| [], _ => Sum.inr Unit.unit -| h::t, e => if (eval_expr e h) then Sum.inl h else find_model t e - --- Tests -#reduce get_model_fun (X) -- expect Sum.inl _ (a function) -#reduce get_model_fun (X ∧ ¬X) -- expect Sum.inr Unit.unit - --- List of Booleans for first *num_vars* variables under given Interp -def interp_to_bools : Interp → (num_vars : Nat) → List Bool -| _, 0 => [] -| i, (n' + 1) => interp_to_bools i n' ++ [(i (var.mk n'))] -/-! -Given some model, return list of Boolean values of first *num_vars* -variables, or in the case of no model, just return an empty list. --/ -def some_model_or_none_to_bools : SomeModelOrNone → (num_vars : Nat) → List Bool -| Sum.inl i, n => interp_to_bools i n -| Sum.inr _, _ => [] - - --- Test cases -#reduce some_model_or_none_to_bools (get_model_fun (X ∧ ¬Y)) 2 -#reduce some_model_or_none_to_bools (get_model_fun (X ∧ ¬X)) 2 -#reduce some_model_or_none_to_bools (get_model_fun (¬X ∨ ¬Y)) 2 --- list of all models, then convert to list of lists of bools? \ No newline at end of file diff --git a/Student/RK - exam_1 _Student.lean b/Student/RK - exam_1 _Student.lean new file mode 100644 index 00000000..89c1c4a0 --- /dev/null +++ b/Student/RK - exam_1 _Student.lean @@ -0,0 +1,386 @@ +/-! +# UVa CS2120-002 F23 Midterm Exam + +The first section of this exam just repeats our definition +of propositional logic syntax and semantics. Skip ahead to +the second section to find the exam. + +## Propositional Logic: Syntax, Sematics, Satisfiability + +This section of the exam simply includes our formal +definition of the syntax and semantics of propositional +logic and of functions that determine whether a given +expression is valid, satisfiable, or unsatisfiable. + +### Syntax +-/ + +-- variables +structure var : Type := (n: Nat) + +-- connectives/operators +inductive unary_op : Type | not +inductive binary_op : Type +| and +| or +| imp +| iff + +-- expressions (abstract syntax) +inductive Expr : Type +| top_exp +| bot_exp +| var_exp (v : var) +| un_exp (op : unary_op) (e : Expr) +| bin_exp (op : binary_op) (e1 e2 : Expr) + +-- concrete syntax +notation "{"v"}" => Expr.var_exp v +prefix:max "¬" => Expr.un_exp unary_op.not +infixr:35 " ∧ " => Expr.bin_exp binary_op.and +infixr:30 " ∨ " => Expr.bin_exp binary_op.or +infixr:25 " ⇒ " => Expr.bin_exp binary_op.imp +infixr:20 " ⇔ " => Expr.bin_exp binary_op.iff +notation " ⊤ " => Expr.top_exp +notation " ⊥ " => Expr.bot_exp + + +/-! +### Semantics +-/ + +-- meanings of unary operators +def eval_un_op : unary_op → (Bool → Bool) +| unary_op.not => not + +-- missing binary Boolean operators +def implies : Bool → Bool → Bool +| true, false => false +| _, _ => true + +def iff : Bool → Bool → Bool +| true, true => true +| false, false => true +| _, _ => false + +-- meanings of binary operators +def eval_bin_op : binary_op → (Bool → Bool → Bool) +| binary_op.and => and +| binary_op.or => or +| binary_op.imp => implies +| binary_op.iff => iff + +-- The interpretation type +def Interp := var → Bool + +-- The meanings of expressions "under" given interpretations +def eval_expr : Expr → Interp → Bool +| Expr.top_exp, _ => true +| Expr.bot_exp, _ => false +| (Expr.var_exp v), i => i v +| (Expr.un_exp op e), i => (eval_un_op op) (eval_expr e i) +| (Expr.bin_exp op e1 e2), i => (eval_bin_op op) (eval_expr e1 i) (eval_expr e2 i) + +/-! +### Satisfiability + +We built a satisfiability checker for propositional logic, +in several pieces. This subsection includes all definitions. + +#### Truth Table Input Rows +-/ + +-- Nat to Binary +-- You don't need to worry about the "have" part +def right_bit (n : Nat) := n%2 +def shift_right (n : Nat) := n/2 +def nat_to_bin : Nat → List Nat +| 0 => [0] +| 1 => [1] +| n' + 2 => + have : (shift_right (n' + 2)) < (n' + 2) := sorry + nat_to_bin (shift_right (n' + 2)) ++ [right_bit (n' + 2)] + +-- Left pad with zeros +def zero_pad : Nat → List Nat → List Nat + | v, l => zero_pad_recursive (v - (l.length)) l +where zero_pad_recursive : Nat → List Nat → List Nat + | 0, l => l + | v'+1, l => zero_pad_recursive v' (0::l) + +-- Make row of bits at index "row" padded out to "cols" wide +def mk_bit_row : (row: Nat) → (cols : Nat) → List Nat +| r, c => zero_pad c (nat_to_bin r) + +-- Convert list of bits to list of bools +def bit_to_bool : Nat → Bool +| 0 => false +| _ => true + +def bit_list_to_bool_list : List Nat → List Bool +| [] => [] +| h::t => (bit_to_bool h) :: (bit_list_to_bool_list t) + +-- Make row'th row of truth table with vars variables +def mk_row_bools : (row : Nat) → (vars : Nat) → List Bool +| r, v => bit_list_to_bool_list (mk_bit_row r v) + +/-! +#### Interpretations +-/ + +-- Convert list of bools to interpretation +def override : Interp → var → Bool → Interp +| old_interp, var, new_val => + (λ v => if (v.n == var.n) -- when applied to var + then new_val -- return new value + else old_interp v) -- else retur old value + +def bools_to_interp : List Bool → Interp + | l => bools_to_interp_helper l.length l +where bools_to_interp_helper : (vars : Nat) → (vals : List Bool) → Interp + | _, [] => (λ _ => false) + | vars, h::t => + let len := (h::t).length + override (bools_to_interp_helper vars t) (var.mk (vars - len)) h + +-- Make an interpretation for given row with "vars" variables +def mk_interp_vars_row : (vars: Nat) → (row: Nat) → Interp +| v, r => bools_to_interp (mk_row_bools r v) + +-- Given number of variables, return list of interpretations +def mk_interps (vars : Nat) : List Interp := + mk_interps_helper (2^vars) vars +where mk_interps_helper : (rows : Nat) → (vars : Nat) → List Interp + | 0, _ => [] + | (n' + 1), v => (mk_interp_vars_row v n')::mk_interps_helper n' v + +-- Count the number of variables in a given expression +def max_variable_index : Expr → Nat +| Expr.top_exp => 1 +| Expr.bot_exp => 0 +| Expr.var_exp (var.mk i) => i +| Expr.un_exp _ e => max_variable_index e +| Expr.bin_exp _ e1 e2 => max (max_variable_index e1) (max_variable_index e2) + +def num_vars : Expr → Nat := λ e => max_variable_index e + 1 + +/-! +#### Truth Table Output Column +-/ +def eval_expr_interps : List Interp → Expr → List Bool +| [], _ => [] +| h::t, e => eval_expr_interps t e ++ [eval_expr e h] + +-- Given expression, return truth table outputs by ascending row index +def truth_table_outputs : Expr → List Bool +| e => eval_expr_interps (mk_interps (num_vars e)) e + +/-! +### Satisfiability Checkers +-/ + +-- functions to check if bool list has any, resp. all, values true +def reduce_or : List Bool → Bool +| [] => false +| h::t => or h (reduce_or t) + +def reduce_and : List Bool → Bool +| [] => true +| h::t => and h (reduce_and t) + +-- Three main functions: test given expression for satsfiability properties +def is_sat : Expr → Bool := λ e : Expr => reduce_or (truth_table_outputs e) +def is_valid : Expr → Bool := λ e : Expr => reduce_and (truth_table_outputs e) +def is_unsat : Expr → Bool := λ e : Expr => not (is_sat e) + + +/-! +**************** +**************** +EXAM STARTS HERE +**************** +**************** +-/ + +/-! +## #1 Proofs as Programs + +### a. And elimination [15 points] + +Prove, by completing the following function definition, that +from a value of type α × β one can always derive a value of +type α. +-/ + +-- Your answer here + +def and_elimination {α β : Type} : α × β → α +| (a, _) => a + +/-! +### b. Funny transitivity [15 points] + +Prove, by completing the following function definition, that +(α → β) × (β → γ) → (α → γ). In other words, if you have a +pair of functions, one converting α to β and one converting +β to γ, then you can always construct a function from α to γ. +Hint: Use type-guided top-down programming, and remember how +to express a *function* value: that's what you have to return +in this case. +-/ + +-- Your answer here + +def funny_transitivity {α β γ : Type} : (α → β) × (β → γ) → (α → γ) +| (a2b, b2y) => (fun a => b2y (a2b a)) + + +/-! +### c. Ex empty quodlibet [15 points] + +Prove that if a type, α, is uninhabited then from an +assumed value (a : α) one can always derive a value of +*any* type, β. +-/ + +-- Your answer here + +def ex_empty {α β : Type} : (α → Empty) → α → β +| a2e, a => nomatch (a2e a) + +/-! +## #2 Data Types + +### a. Enumerated Types [10 points] + +Define three enumerated types, called Bread, Spread, +and Cheese, where the values of type Bread are white and +wheat; the values of type Spread are jam and pesto; +and the values of type Cheese are cheddar and brie. +-/ + +-- Your answers here + +inductive Bread : Type +| white +| wheat + +inductive Spread : Type +| jam +| pesto + +inductive Cheese : Type +| cheddar +| brie + +/-! +### b. An interesting inductive type [15 points] + +Define a data type called Sandwich, with one constructor +called mk, taking as its arguments a choice of bread and +*either (but not both)* a choice of Cheese or a choice of +Spread. Hint: Remember how to define a type that carries +a value of either one type or another. Extra credit [2pts] +for using *structure* instead of *inductive* to declare +the Sandwich type. +-/ + +-- Your answer here +structure Sandwich : Type := (c : Bread) (c2: (Cheese ⊕ Spread)) + + + +/-! +### c. Now make yourself a Sandwich [15 points] + +Define jam_sandwhich to be a Sandwhich made with +wheat bread and jam. You have to use Sandwich.mk +to create a term representing a sandwich with wheat +bread and jam as a spread. +-/ + +-- Your answer here + +def jam_sandwich : Sandwich := Sandwich.mk Bread.wheat (Sum.inr Spread.jam) + + +/-! +### #3 Recursive Data and Functions [15 points] + +In our implementation of propositional logic, we defined a +function, *bit_list_to_bool_list*, to convert a list of Nat +to a corresponding list of Bool. Here's the definition (with +a tick mark on the name). +-/ + +def bit_list_to_bool_list' : List Nat → List Bool +| [] => [] +| h::t => (bit_to_bool h) :: (bit_list_to_bool_list t) + +-- expect [true, false, true, false, true] +#eval bit_list_to_bool_list [1, 0, 3, 0, 1] + +/-! +Your job is to generalize this solution by defining a new +function, called *map*. Generalize the types, Nat and Bool, +to arbitrary types, α and β. Generalize *bit_to_bool* to +be any function for converting an individual α value into +a corresponding β value. Your map function will thus take +as its arguments (1) type parameters (make them implicit), +(2) a function for converting elements, and (3) a List of +α values, and will return a correspond List of β values. +-/ + +-- Your answer here + +def map {α β : Type} : (α → β) → List α → List β +| _, [] => [] +| a2b, h::t => (a2b h) :: (map a2b t) + +-- test case: use map instead of bit_list_to_bool_list +-- expect [true, false, true, false, true] +#eval map bit_to_bool [1, 0, 1, 0, 1] + + +/-! +## #4 Propositional Logic [10 pts Extra Credit, for A+] + +Propositional logic as we've formulate it has variable +expressions (atomic expressions), and larger expressions +built by applying connectives (∧, ∨, ¬, ⇒, ⇔) to smaller +expressions. + +Some formalizations of propositional logic also include +the *constant* expressions *True* and *False*. In concrete +syntax, they are sometimes written as ⊤ (pronounced *top*) +and ⊥ (bottom). Semantically ⊤ evaluates to Boolean true +and ⊥ evaluates to Boolean false. + +### a. Extend Syntax and Semantics + +Your job is to extend our syntax and semantics to include +⊤ and ⊥ as valid expressions. You will have to carry out +the following tasks. + +- add top_exp and bottom_exp as constructors in Expr +- note that we've already added concrete notation definitions +- add rules for evaluating these expressions to eval_expr +- add rules for these expressions to max_variable_index + +When you're done, the following logic should evaluate +without error. +-/ + +def X := {var.mk 0} +#eval is_sat (X ⇒ ⊥) -- expect true + +/-! +### b. Give a model for (X ⇒ ⊥) + +Recall that a model is a binding of values to variables +that makes a proposition true. What value must X have to +make (X ⇒ ⊥) true? + +-- Answer: {X is false} is a model of (X ⇒ ⊥) +-/ From 18313a7fcb5e15d5de23a04a1d6b0281b3868981 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Sun, 5 Nov 2023 04:24:43 +0000 Subject: [PATCH 11/15] j --- Instructor/Lectures/lecture_14.lean | 128 ++++++++++++++-------------- Student/lecture_17.lean | 98 +++++++++++++++++++++ 2 files changed, 162 insertions(+), 64 deletions(-) create mode 100644 Student/lecture_17.lean diff --git a/Instructor/Lectures/lecture_14.lean b/Instructor/Lectures/lecture_14.lean index 40868edc..84fbeb9f 100644 --- a/Instructor/Lectures/lecture_14.lean +++ b/Instructor/Lectures/lecture_14.lean @@ -1,23 +1,23 @@ /-! # Model Finders and Counterexample Generators -The main topic of this chapter is model and +The main topic of this chapter is model and counterexample generation: given a proposition in propositional logic, find models if there are any, and similarly find counterexamples if there -are any. +are any. We'll begin by generalizing some patterns we've been seeing in functions that handle lists. The first section introduces and illustrates the use -of List map, foldr, and filter functions. +of List map, foldr, and filter functions. -Second, we'll see that with these functions in -hand and a better understanding of recursion, we +Second, we'll see that with these functions in +hand and a better understanding of recursion, we can improve our propositional logic satisfiability checking functions. -Finally, we will introduce the concept of a model +Finally, we will introduce the concept of a model finder for expressions in propositional logic, also known as a *SAT solver*, and see how that idea can also provide a way to generate counterexamples to @@ -30,7 +30,7 @@ propositions that are not always true. The *List.map* function, converts a list of α terms, into a list of corresponding β values by applying a given function, *f : α → β*, to each α in turn. E.g., -*map (λ (s : String) => s.length) ["Hello", "Lean"]* +*map (λ (s : String) => s.length) ["Hello", "Lean"]* returns *[5, 4]*. Here's the type of List.map in the Lean libraries. @@ -54,11 +54,11 @@ takes any number of arguments, in a list. As an example, our to just one, indicating whether the list has at least one true value, is simply an n-ary extension of *or*. Applying such an n-ary operation on no arguments (an empty list) simply returns -the identity element (base case value). +the identity element (base case value). -/ -#check @List.foldr +#check @List.foldr #eval List.foldr Nat.add 0 [1,2,3,4,5] -- expect 15 #eval List.foldr Nat.mul 0 [1,2,3,4,5] -- expect 120, oops! @@ -70,7 +70,7 @@ the identity element (base case value). -/ /-! -The *List.filter* function takes a list, *l* of α values, and an α → Bool +The *List.filter* function takes a list, *l* of α values, and an α → Bool predicate function that indicates whether a given α value has a particular property, and returns the sublist of α values in l that have property, *p*. -/ @@ -103,18 +103,18 @@ inductive binary_op : Type | iff inductive Expr : Type -| true_exp -| false_exp +| true_exp +| false_exp | var_exp (v : var) | un_exp (op : unary_op) (e : Expr) | bin_exp (op : binary_op) (e1 e2 : Expr) notation "{"v"}" => Expr.var_exp v -prefix:max "¬" => Expr.un_exp unary_op.not -infixr:35 " ∧ " => Expr.bin_exp binary_op.and -infixr:30 " ∨ " => Expr.bin_exp binary_op.or +prefix:max "¬" => Expr.un_exp unary_op.not +infixr:35 " ∧ " => Expr.bin_exp binary_op.and +infixr:30 " ∨ " => Expr.bin_exp binary_op.or infixr:25 " ⇒ " => Expr.bin_exp binary_op.imp -infixr:20 " ⇔ " => Expr.bin_exp binary_op.iff +infixr:20 " ⇔ " => Expr.bin_exp binary_op.iff notation " ⊤ " => Expr.top_exp notation " ⊥ " => Expr.bot_exp @@ -140,10 +140,10 @@ def eval_bin_op : binary_op → (Bool → Bool → Bool) | binary_op.imp => implies | binary_op.iff => iff -def Interp := var → Bool +def Interp := var → Bool -- main semantic evaluation function -def eval_expr : Expr → Interp → Bool +def eval_expr : Expr → Interp → Bool | Expr.true_exp, _ => true | Expr.false_exp, _ => false | (Expr.var_exp v), i => i v @@ -155,27 +155,27 @@ def eval_expr : Expr → Interp → Bool /-! ## Satisfiability Properties -Next we present an improved version of or code for checking of -expressions for validity, satisfiability, and unsatisfiability. +Next we present an improved version of or code for checking of +expressions for validity, satisfiability, and unsatisfiability. One significant enhancement, suggested by Mikhail, is replacement -of our rather ponderous approach to generating the input sides of +of our rather ponderous approach to generating the input sides of truth tables with a single recursive function. We also use our new map, filter, and reduce functions to replace numerous specialized -instances. +instances. -### Truth Table Input Rows +### Truth Table Input Rows We had previousl developed an explanatory but ponderous approach to generating a list of of all lists of boolean input rows. The idea was to treat the each (input) row as a binary expansion of the row index -(a lit of bit), convert bits to bools, and add padding on the left. +(a lit of bit), convert bits to bools, and add padding on the left. Mikhail noticed that we could replace it all with a single recursive -function. +function. Exercise: Study this function definition until you understand fully how it works. Along the way, use it to generate a few outputs then -inspect them to be sure you know what the function does. Figure out +inspect them to be sure you know what the function does. Figure out the recursion works to the point you're confident you could write the code yourself. To test yourself, erase the implementation then write it again. @@ -184,7 +184,7 @@ it again. -- Mikhail def make_bool_lists: Nat → List (List Bool) | 0 => [[]] -| n' + 1 => (List.map (fun L => false::L) (make_bool_lists n')) ++ +| n' + 1 => (List.map (fun L => false::L) (make_bool_lists n')) ++ (List.map (fun L => true::L) (make_bool_lists n')) -- REVIEW @@ -194,7 +194,7 @@ def make_bool_lists: Nat → List (List Bool) #eval make_bool_lists 3 /-! -#### Bool List to/from Interpretation Function +#### Bool List to/from Interpretation Function Given a list of *n* Boolean values, [b₀, ..., bₙ₋₁], we have to be able to turn it into an *interpretation* function, so that we @@ -204,14 +204,14 @@ each vᵢ means (var.mk i). Our approach will be to start with a given interpretation (such as the *all false* interpretation) and then for each *bᵢ* in the -list of Booleans, we will iteratively *override* the function so +list of Booleans, we will iteratively *override* the function so that when it's used to evaluate the value of *vᵢ* it will return *bᵢ*. -/ -- Function override def override : Interp → var → Bool → Interp -| old_interp, var, new_val => +| old_interp, var, new_val => (λ v => if (v.n == var.n) -- when applied to var then new_val -- return new value else old_interp v) -- else retur old value @@ -225,7 +225,7 @@ where bools_to_interp_helper : (vars : Nat) → (vals : List Bool) → Interp | vars, h::t => let len := (h::t).length -- override recursively computed interp mapping variable to head bool - override (bools_to_interp_helper vars t) (var.mk (vars - len)) h + override (bools_to_interp_helper vars t) (var.mk (vars - len)) h /-! To think about: smells like some kind of fold. Iteratively combine @@ -249,14 +249,14 @@ def interp_to_list_bool : (num_vars : Nat) → Interp → List Bool | (n' + 1) , i => interp_to_list_bool n' i ++ [(i (var.mk n'))] -- From number of variables, list of interpretations, to list of Bool lists -def interps_to_list_bool_lists : Nat → List Interp → List (List Bool) +def interps_to_list_bool_lists : Nat → List Interp → List (List Bool) | vars, is => List.map (interp_to_list_bool vars) is /-! #### Maximum Variable Index in Expression We will consider the number of variables to include in a truth -table for a given expression to be the one plus the zero-based +table for a given expression to be the one plus the zero-based index of the highest-indexed variable in any given expression. For example, if an expression uses only *v₉* explicitly we will consider it to use all ten variables, *v₀* to *v₉* inclusive. @@ -267,7 +267,7 @@ def max_variable_index : Expr → Nat | Expr.false_exp => 0 | Expr.var_exp (var.mk i) => i | Expr.un_exp _ e => max_variable_index e - | Expr.bin_exp _ e1 e2 => max (max_variable_index e1) (max_variable_index e2) + | Expr.bin_exp _ e1 e2 => max (max_variable_index e1) (max_variable_index e2) /-! @@ -278,20 +278,20 @@ of the highest-indexed variable in the expression, plus one to account for the usual zero-based indexing. -/ -def num_vars : Expr → Nat := λ e => max_variable_index e + 1 +def num_vars : Expr → Nat := λ e => max_variable_index e + 1 /-! #### From Expression to List of Interpretations Given an expression, we compute the number, *n*, of variables it uses then we generate a list of all *2^n* interpretation functions -for it. Note that we just eliminate a whole raft of ponderous code -with a single clever recursive function, thanks to Mikhail. - +for it. Note that we just eliminate a whole raft of ponderous code +with a single clever recursive function, thanks to Mikhail. + -/ -- Number of variables to interpretations list using Mikhail's code def mk_interps_vars : Nat → List Interp -| n => List.map bool_list_to_interp (make_bool_lists n) +| n => List.map bool_list_to_interp (make_bool_lists n) -- From expression to a list of interpretations for it def mk_interps_expr : Expr → List Interp @@ -305,8 +305,8 @@ with a single line of code using List.map. The resulting list of Boolean values should reflect the values of the given expression under each interpretation in the list of interpretations. You will use map to convert a list of interpretations (for e) into a list of -Boolean values. --/ +Boolean values. +-/ -- The column of truth table outputs for e def truth_table_outputs' : Expr → List Bool @@ -341,7 +341,7 @@ def reduce_and := List.foldr and true Finally we can define the API we want to provide for checking arbitrary propositional logic expressions for their satisfiability -properties: for being satisfiable, valid, or unsatisfiable. +properties: for being satisfiable, valid, or unsatisfiable. -/ def is_sat (e : Expr) : Bool := reduce_or (truth_table_outputs e) @@ -354,11 +354,11 @@ def is_unsat (e : Expr) : Bool := not (is_sat e) We now turn to the third and last major topic in this chapter. Given a propositional logic expression, *e*, a model finder finds -a model of *e* if there is one. It returns either a model of e if -there is one or a signal that there isn't one. +a model of *e* if there is one. It returns either a model of e if +there is one or a signal that there isn't one. To return either a model if there is one or a signal that there -isn't one, we could use a sum type: either a model on the left or +isn't one, we could use a sum type: either a model on the left or Unit.unit on the right to signal that there is no model. -/ @@ -366,7 +366,7 @@ def SomeInterpOrNone := Interp ⊕ Unit -- NB: this is a *type* /-! A better solution is to use the standard polymorphic Option type. -Its two constructors are *some α* and *none*. The first is used +Its two constructors are *some α* and *none*. The first is used to construct an option carrying a value, (a : α). The second is used (in lieu of *Sum.inr Unit.unit*) to indicate that there's no value to provide. @@ -381,7 +381,7 @@ def o2 := @Option.none Bool -- need to make type argument explicit /-! Here's the main API for our model finder. Given an expression, *e*, -return *some m*, *m* a model of *e* if there is one, or *none* if not. +return *some m*, *m* a model of *e* if there is one, or *none* if not. -/ #check @Option @@ -395,7 +395,7 @@ where find_model_helper : List Interp → Expr → Option Interp -- REVIEW --- Utility: convert a "Option model" into a list of Bools, empty for none +-- Utility: convert a "Option model" into a list of Bools, empty for none def some_model_or_none_to_bools : SomeInterpOrNone → (num_vars : Nat) → List Bool | Sum.inl i, n => interp_to_list_bool n i | Sum.inr _, _ => [] @@ -413,13 +413,13 @@ by generating an exhaustive list of all interpretations then filtering them to save those that make *e* true. -/ -def find_models (e : Expr) := - List.filter -- filter on +def find_models (e : Expr) := + List.filter -- filter on (λ i => eval_expr e i) -- i makes e true (mk_interps_expr e) -- over all interps --- Render models of e : Expr as List of Bool Lists (num_vars e long) +-- Render models of e : Expr as List of Bool Lists (num_vars e long) def find_models_bool : Expr → List (List Bool) | e => interps_to_list_bool_lists (num_vars e) (find_models e) @@ -437,14 +437,14 @@ def count_models := List.length ∘ find_models /-! ### Counter-Example Generator -More interesting, and oft used, is *counter-example finding*. When -we say we want to *disprove* a proposition, mean is that we want to +More interesting, and oft used, is *counter-example finding*. When +we say we want to *disprove* a proposition, mean is that we want to show that it's not *valid*: that there's at least one interpretation that makes the proposition is false. If that is so, then it makes the negation of the proposition true. Counterexamples, if there are any, are models of the negation of a propositio; and we now know how to find such models using our model finder. Defining a counter-example -finder is thus trivial. +finder is thus trivial. -/ def find_counterexamples (e : Expr) := find_models (¬e) @@ -466,13 +466,13 @@ def Z := {var.mk 2} #eval List.foldr and true (truth_table_outputs (X ∧ Y)) /-! -Is it true that if X being true makes Y true, then does X being +Is it true that if X being true makes Y true, then does X being false make Y false? -/ #check ((X ⇒ Y) ⇒ (¬X ⇒ ¬Y)) -#eval is_valid ((X ⇒ Y) ⇒ (¬X ⇒ ¬Y)) -#reduce find_counterexamples_bool ((X ⇒ Y) ⇒ (¬X ⇒ ¬Y)) +#eval is_valid ((X ⇒ Y) ⇒ (¬X ⇒ ¬Y)) +#reduce find_counterexamples_bool ((X ⇒ Y) ⇒ (¬X ⇒ ¬Y)) #reduce (implies (implies false true) (implies true false)) /-! @@ -481,8 +481,8 @@ then does Y being false imply X is false? -/ #check ((X ⇒ Y) ⇒ (¬Y ⇒ ¬X)) -#eval is_valid ((X ⇒ Y) ⇒ (¬Y ⇒ ¬X)) -#reduce find_counterexamples_bool ((X ⇒ Y) ⇒ (¬Y ⇒ ¬X)) +#eval is_valid ((X ⇒ Y) ⇒ (¬Y ⇒ ¬X)) +#reduce find_counterexamples_bool ((X ⇒ Y) ⇒ (¬Y ⇒ ¬X)) /-! @@ -509,13 +509,13 @@ Search for models (returns list of functions) -/-! +/-! Search for models (returns list of list of bools) --/ +-/ #eval find_models_bool (X ∧ ¬ X) -- [] #eval find_models_bool (X ∨ ¬ X) -- [[false], [true] -#eval find_models_bool (X ∧ Y) -- [[true, true]] +#eval find_models_bool (X ∧ Y) -- [[true, true]] #eval find_models_bool (¬(X ∧ Y) ⇒ ¬X ∨ ¬Y) -- all four interps #eval find_models_bool ((X ⇒ Y) ⇒ (Y ⇒ Z) ⇒ (X ⇒ Z)) -- all eight interps @@ -528,5 +528,5 @@ Forthcoming: - Expand make_bool_lists applied to values 0-3. - Validate a list of standard inference rules. - Find the Fallacies, Explain Counterexamples. -- Replace ponderous function definition using map. --/ \ No newline at end of file +- Replace ponderous function definition using map. +-/ diff --git a/Student/lecture_17.lean b/Student/lecture_17.lean new file mode 100644 index 00000000..3ae264ef --- /dev/null +++ b/Student/lecture_17.lean @@ -0,0 +1,98 @@ +#check Empty + +-- inductive Empty : Type + +inductive Chaos : Type + +def from_empty (e : Empty) : Chaos := nomatch e + +#check False + +-- inductive False : Prop + +def from_false {P : Prop} (p : False) : P := False.elim p + +def from_false_true_is_false (p : False) : True = False := False.elim p + +/-! +Unit ==> True +-/ + +#check Unit +-- inductive PUnit : Sort u where +-- | unit : PUnit + +#check True +-- inductive True : Prop where +-- | intro : True + +#check True.intro + +-- no elimination rule + +def proof_of_true : True := True.intro + +def false_implies_true : False → True := λ f => False.elim f + +/-! +Prod => And +-/ + +#check Prod + +#check And + +inductive Birds_chirping : Prop +| yep +| boo + +inductive Sky_Blue : Prop +| yep + +#check (And Birds_chirping Sky_Blue) + +theorem both_and : And Birds_chirping Sky_Blue := And.intro Birds_chirping.boo Sky_Blue.yep + +/-! +Proof Irrelevance +-/ + +namespace cs2120f23 + +inductive Bool : Type +| true +| false + +theorem proof_equal : Birds_chirping.boo = Birds_chirping.yep := by trivial + +-- In Prop all proofs are equivalent +-- Choice of values in Prop can't be influence computations + +#check Sum +#check Or + +theorem one_or_other : Or Birds_chirping Sky_Blue := Or.inl Birds_chirping.yep +theorem one_or_other' : Or Birds_chirping Sky_Blue := Or.inr Sky_Blue.yep + +example : Or Birds_chirping (0=1) := Or.inl Birds_chirping.yep +example : (0=1) ∨ (1=2) := _ + +theorem or_comm {P Q : Prop} : P ∨ Q → Q ∨ P := +λ d => + match d with + | Or.inl p => Or.inr p + | Or.inr q => Or.inl q + +/-! +Not (no) +-/ + +def no (α : Type) := α → Empty + +#check Not + +example : no Chaos := λ c => nomatch c + +inductive Raining : Prop + +example : ¬ Raining := λ (r : Raining) => nomatch r From 5c0ec00bf8500076e71f3b725b67d76b32b834b8 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Thu, 9 Nov 2023 21:00:05 +0000 Subject: [PATCH 12/15] f --- Student/hw9.lean | 221 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 Student/hw9.lean diff --git a/Student/hw9.lean b/Student/hw9.lean new file mode 100644 index 00000000..d3c9bf56 --- /dev/null +++ b/Student/hw9.lean @@ -0,0 +1,221 @@ +/-! +# Examples and Excluded Middle +-/ + +/-! + +## Examples + +We begin with examples to review and reinforce lessons so far. + +### ¬False is Valid (True Under Any Interpretation) + +This example will teach and reinforce the idea that, to prove +¬P, for any proposition P, you show that an assumed proof of +P leads to a contradiction (a proof of False). Formally, you +show P → False (by implementing a function of this type), from +which it follows that P is uninhabited (there is assuredly no +proof). +-/ +example : ¬False := +λ f => False.elim f + + +/-! +### The *No Contradiction* Rule is Valid + +This example teaches and reinforces the idea that a proof, +np, of a proposition, ¬P, works like a function, namely one +that takes a proof of P as an assumed argument, and that in +this context constructs and returns a proof of False. The +upshot is that if you have in your context both a proof, p, +of some proposition, P, and a proof, np, of ¬P, then you +have both a function, np: P → false, and an argument, p : P, +from which you can derive a proof of False by applying np +to p: (np p). +-/ +example (P : Prop) : ¬(P ∧ ¬P) := +λ (⟨ p, np ⟩) => + np p + + +/-! +### Transitivity of Implication is Valid +Prove the transitivity of implication. Note carefully +the relationship of this proof to function composition. +-/ +example (P Q R : Prop) : (P → Q) → (Q → R) → (P → R) := +fun pq qr => + fun p => + qr (pq p) + +-- Here it is in Type, as function composition from HW#3 +example (α β γ : Type) : (α → β) → (β → γ) → (α → γ) := +fun ab bc => + fun a => + bc (ab a) + +/-! +### Or Distributes Over And + +This example emphasizes how one reasons when given an +assumption that some disjunction is true: it's by case +analysis. There are two different ways in which that +assumed disjunction could be true, and you need to show +that the conclusion follows *in either case*. So first +you assume the first case holds, and show the conclusion +follows, then you assume the second case and show that +the conclusion still follows. Therefore the conclusion +follows in either case. Of course in general you might +have to do additional complex reasoning for each case. +-/ +example (P Q R : Prop) : P ∨ (Q ∧ R) → (P ∨ Q) ∧ (P ∨ R) +| Or.inl p => ⟨ Or.inl p, Or.inl p ⟩ +| Or.inr ⟨ q, r ⟩ => ⟨ Or.inr q, Or.inr r⟩ + +/-! +### At least one of DeMorgan's Laws is Valid + +This example reinforces the idea that if you have to +return a proof of a negation, ¬X, what you really have +to return is a function of type X → False. Note that +the return values here are lambda expressions. +-/ +example (A B : Prop) : ¬A ∨ ¬B → ¬(A ∧ B) +| Or.inl na => + λ ⟨ a, _ ⟩ => na a +| Or.inr nb => + λ ⟨ _, b ⟩ => nb b + +/-! +### Not All Classical Reasoning is Constructively Valid + +Now here's a proposition that seems that it ought to be +true. It is *classically* true. You can check it using our +validity checker, with A and B as propositional variables. +But we struggled to prove it *constructively* in class, and +there's a good reason for that: it's impossible to construct +a proof of either ¬A or ¬B from a proof of ¬(A ∧ B). This one +of the four DeMorgan's laws is clasically valid but it is +*not valid* in constructive logic. No wonder we had trouble +proving it! +-/ + +example (A B : Prop) : ¬(A ∧ B) → (¬A ∨ ¬B) +| nab => _ -- nowhere to go, we're stuck + +/-! +So are we stuck in Lean with a logic that's actually weaker +that propositional or first-order predicate logic--capable of +proving fewer theorems? What constructive lacks relative to +classical logics is *the law of the excluded middle* an axiom. + +-/ + +/-! +## The Law of the Excluded Middle as an Independent Axiom +-/ + +/-! +In classical logics, a proposition is either true or false. In +constructive logic, a proposition, P, is true if there's a proof +of it, or it's false if there's a proof of ¬P, but its validity +is indeterminate if there isn't a proof either way. Consequently, +the proposition, P ∨ ¬P, is not necessarily valid. We might not +have either a proof of P or of ¬P, in which case we can't prove +the disjunction. +-/ + +example : X ∨ ¬X := _ -- no proof; not *constructively valid* + +/-! +A key difference between constructive and classical logic is that +the latter takes the *law of the excluded middle (em)* as an axiom. +It lets you to assume that for any proposition, P, *P ∨ ¬P* is true, +and you can therefore do a case analysis, with one case where P is +true (and in Lean has a proof) and one case where ¬P is true (and +has a proof). + +Taking the law of the excluded middle turns constructive logic back +into a classical logic. There is no longer a middle, indeterminate, +state to consider. For each proposition, there are only two possible +states: that it's true or that its negation is true. Acccepting this +assumption as an axiom makes it possible once again to prove that +our variant of DeMorgan's law is valid. So let's dive in and see how +this works. +-/ + +/-! +First, here's the formal statement of the law of the excluded middle. +It says that if you give and proposition, P, it'll return a proof/value +of P ∨ ¬P. +-/ +axiom em (P : Prop): P ∨ ¬ P + +/-! +The axiom keywork instructs Lean to accept a definition +without a proof, or implementation. What we have here is +the definition of *em* (for *excluded middle*) as being a +kind of *proof generator:* you feed it any proposition, +*P*, and it returns a *proof* of the proposition, *P ∨ ¬P*. + +Now here's the crucial trick: Once you have such a proof, +you can do *case analysis* on it. In the first case (Or.inl p), +you'll have a proof *p : P*. In the second case, you'll have a +proof, np, of ¬P. One has thus excluded the possibility of a +"middle" case, where one doesn't have a proof either way. +-/ + +/-! +Now, of course, proving a proposition, X ∨ ¬X, is trivial, +no matter what proposition X represents. The validity of +*X ∨ ¬X* follows from a simple application of em to X. +-/ + + +def P : Prop := _ +#check em P + +/-! +So we've now seen that if given any proposition, P, and +the axiom of the excluded middle, you can always obtain +a proof of P ∨ ¬P for free. The trick now, again, is to do +case analysis on that proof, There are just two cases: +Or.inl with a proof (p : P) or Or.inr with a proof(np : ¬P). +The "middle" case, where we don't don't a proof either +way, no longer needs to be considered. +-/ +example : X ∨ ¬X := em X + +/-! +### Use *em* For Classical Reasoning + +Accepting the law of the excluded middle as an axiom puts +us back in classical reasoning space. Using it, you can get +yourself proofs of both A ∨ ¬A and B ∨ ¬B, and now, to prove +the validity of the DeMorgan's law variant that had us hung up, +it's just a matter of showing that the proposition is true in each +of the four resulting cases. + +HOMEWORK: Complete this proof. +-/ + +example (A B : Prop) : ¬(A ∧ B) -> ¬A ∨ ¬B := +λ nab => +let proof_of_aornota := em A +let proof_of_bornotb := em B +match proof_of_aornota with + | Or.inl a => match proof_of_bornotb with + | Or.inl b => Or.inr (λ b => nab (And.intro a b)) + | Or.inr nb => Or.inr nb + | Or.inr na => Or.inl na + +example (A B : Prop) : ¬(A ∧ B) -> ¬A ∨ ¬B := +λ nab => +let proof_of_aornota := em A +let proof_of_bornotb := em B +match proof_of_aornota with + | Or.inl a => match proof_of_bornotb with + | Or.inl b => False.elim (nab (And.intro a b)) + | Or.inr nb => Or.inr nb + | Or.inr na => Or.inl na From f45a1b50a1f03d12d8ff59efff4a1f6e98f45500 Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Tue, 14 Nov 2023 20:43:37 +0000 Subject: [PATCH 13/15] f --- Instructor/Lectures/lecture_18.lean | 3 + Student/lecture_18.lean | 202 ++++++++++++++++++++++++++++ 2 files changed, 205 insertions(+) create mode 100644 Student/lecture_18.lean diff --git a/Instructor/Lectures/lecture_18.lean b/Instructor/Lectures/lecture_18.lean index e7df2fe7..72e991ed 100644 --- a/Instructor/Lectures/lecture_18.lean +++ b/Instructor/Lectures/lecture_18.lean @@ -166,6 +166,9 @@ def pythagorean_triple : Nat → Nat → Nat → Prop - Write an expression for the set of all even length strings -/ + + + /-! ## Quantifiers diff --git a/Student/lecture_18.lean b/Student/lecture_18.lean new file mode 100644 index 00000000..e7df2fe7 --- /dev/null +++ b/Student/lecture_18.lean @@ -0,0 +1,202 @@ +import Mathlib.Init.Set + +/-! +# Predicates + +You've seen that in predicate logic, a proposition is a +declarative statement that asserts that some state of affairs +holds in some domain of discourse. For example, *the natural +number, four, is an even* is a proposition. We've seen one +way to formalize such a proposition using the mod operator. +-/ + +#check 4 % 2 = 0 + +/-! +## Families of Propositions + +Indeed, there is an infinite family of propositions, all just +like this on~`e except for the particular number we plug in instead +of four. As another example, *the natural number, five, is even* +is also a proposition. And there's one such proposition for each +and every natural number. + +We can write this family of propositions by *abstracting* the value, +four, to a variable: e.g., *the natural number, n, is even*, where +*n* can be any natural number. Now we have a predicate. Applying it +to a specific number then returns a proposition about that number. + +We could say that applying the predicate, *the natural number, n, +is even*, to the specific number, four, returns the *proposition*, +*the natural number, four, is even.* + +In Lean and related logics, we represent a predicate as a +*function:* from one or more parameter values to propositions. +Here's our simple example reformulated. +-/ + +def is_even : Nat → Prop := λ n => n % 2 = 0 +#check is_even 4 +#reduce is_even 4 +#check is_even 5 + +/-! +You can see that is_even is a predicate by checking its type. +Indeed, it's a function from a natural number to a proposition +*about* that number: namely that the given number mod two is +zero. The type of our predicate is thus *Nat → Prop*. +-/ +#check (is_even) -- Nat → Prop + +/-! +## Applying a Predicate to Arguments Yields a Proposition + +Given a predicate we derive a proposition by *applying* it to one +or more arguments of the specified types. The *is_even* predicate +is appliable to a natural number as an argument. Here are two examples +applying the is_even predicate. +-/ + +#check is_even 4 +#check is_even 5 +/-! +Note that Lean reduced n%2 in each case to 0 or 1, leaving us +with simpler propositions involving just 0 and 1. +-/ + +/-! +## To Satisfy a Predicate +We will say that specific parameter values *satisfy* a predicate +if they yield a proposition that is true. In a sense, a proposition +thus specifies a *property* (such as that of being even) that a value +might or might not have. For example, four has the property of being +even but five doesn't. + +## Predicates Specify Properties + +In this way a predicate picks out the subset of parameter values with +a specified *property*. As an example, we can make a list of natural +numbers from 0 to 5, apply *is_even* to each, determine which resulting +propositions are true, and thus *pick out* the natural numbers with the +property of being even. +-/ + +#reduce is_even 0 -- ✓ +#reduce is_even 1 -- × +#reduce is_even 2 -- ✓ +#reduce is_even 3 -- × +#reduce is_even 4 -- ✓ +#reduce is_even 5 -- × + +/-! +Indeed, as we'll see in more depth shortly, we can understand +the *set* of all objects having a particular property as those +objects that satisfy the predicate that specifies the property. +In Lean, we can specify the set of even numbers as follows, and +*evens* here becomes nothing but a shorthand for is_even. We'll +see more of this top when we get to lectures on set theory. +-/ + +def evens : Set Nat := { n | is_even n } +#reduce evens 4 +#reduce evens 5 + + +/-! +## Predicates of Multiple Arguments + +Predicates can take any number of arguments. Here are some examples. +-/ + +/-! +### Ordered pairs of numbers and their squares +-/ + +def square_pair : Nat × Nat → Prop +| (n1, n2) => n2 = n1^2 + +#reduce square_pair (1, 1) -- ✓ +#reduce square_pair (2, 4) -- ✓ +#reduce square_pair (3, 9) -- ✓ +#reduce square_pair (5, 20) -- × + +def square_pairs : Set (Nat × Nat) := { p : Nat × Nat | square_pair p } +#reduce square_pairs (3, 9) +#reduce square_pairs (3, 10) +#reduce (3, 9) ∈ square_pairs + +/-! +Here it is again but with two arguments rather than one pair. +This material is new relative to that presented in class. Take +an extra few minutes to study the precise differences in syntax +and sense between these two examples. In one, separate arguments +are packed into pairs, whereas in the second, they're not. They're +disaggregated. +-/ +def square_pair' : Nat → Nat → Prop +| n1, n2 => n2 = n1^2 + +#reduce square_pair' 1 1 -- ✓ +#reduce square_pair' 2 4 -- ✓ +#reduce square_pair' 3 9 -- ✓ +#reduce square_pair' 5 20 -- × + +def square_pairs' : Set (Nat × Nat) := { p : Nat × Nat | square_pair' p.1 p.2 } +#reduce square_pairs +#reduce (3, 9) ∈ square_pairs + +/-! +When we specify multi-argument predicates our practice +is to present the arguments one by one in disaggregated +form. Among other things we can then more easily partially +evaluate the function on any one of its actual parameters. +-/ + +/-! +### Pythagorean triples +-/ + +def pythagorean_triple : Nat → Nat → Nat → Prop +| h, x, y => h^2 = x^2 + y^2 + +/-! +### Exercises + +- Write a predicate for the property of being an even-length string +- Write an expression for the set of all even length strings +-/ + +/-! +## Quantifiers + +Quantifiers are part of the syntax of predicate logic. They allow one +to assert that every object (∀) of some type has some property, or that +there exists (∃) (there is) at least one object of a given type with a +specified property. The syntax of such propositions is as follows: + +- ∀ (x : T), P x +- ∃ (x : T), P x + +### Universal Quantification + +The first proposition can be read as asserting that every value *x* of +type *T* satisfies predicate *P*. Another way to say this is that for +every *x* there is a proof of the proposition, *P x*. Another way to +say that is that there's a function that when given any *x* returns a +proof of *P x*. Indeed, that's how we prove such a proposition: show +that if given any *x* you can produce and return a proof of *P x*. +-/ + +-- Here + + + + +/- +(6) Use "example" in Lean again to state and prove that +⟨ "I love Logic!", 1 ⟩ ∉ str_eq_lens. That's shorthand +notation for ¬("I love Logic!", 1⟩ ∈ str_eq_lens. And you +know what that means. +-/ + +-- Here From d9b5093b86d42bbadc139f1a06f86ec244e7c13b Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Tue, 21 Nov 2023 18:00:17 +0000 Subject: [PATCH 14/15] k --- Instructor/Lectures/lecture_18.lean | 2 + Student/lecture_18.lean | 69 +++++++++++++++++++++++++++++ Student/lecture_19.lean | 56 +++++++++++++++++++++++ 3 files changed, 127 insertions(+) create mode 100644 Student/lecture_19.lean diff --git a/Instructor/Lectures/lecture_18.lean b/Instructor/Lectures/lecture_18.lean index 72e991ed..b56ac0a2 100644 --- a/Instructor/Lectures/lecture_18.lean +++ b/Instructor/Lectures/lecture_18.lean @@ -159,6 +159,8 @@ evaluate the function on any one of its actual parameters. def pythagorean_triple : Nat → Nat → Nat → Prop | h, x, y => h^2 = x^2 + y^2 + + /-! ### Exercises diff --git a/Student/lecture_18.lean b/Student/lecture_18.lean index e7df2fe7..25b9c345 100644 --- a/Student/lecture_18.lean +++ b/Student/lecture_18.lean @@ -166,6 +166,75 @@ def pythagorean_triple : Nat → Nat → Nat → Prop - Write an expression for the set of all even length strings -/ +/- +### Homework +-/ + +/- +(1) Define a predicate, ev_len_str, expressing the property of a string of being of an even-length. +-/ + +-- Here + +def ev_len_str : String → Prop +| s => s.length %2 =0 + + +/- +(2) Use #check to typecheck an expression for the set of all +even length strings. +-/ + +-- Here + +#check {s: String | ev_len_str s} + + +/- +(3) Define a predicate, str_eq_len, applicable to any +String value, s, and to any Nat value, n, that is satisfied +just in those cases where s.length equals n. +-/ + +-- Here + +def str_eq_len : String → Nat → Prop +| s, n => s.length = n + +/- +(4) Define str_eq_lens : set String × Nat, to be the *set* +of all ordered pairs, p = ⟨ s, n ⟩, such that n = s.length. +-/ + +-- Here + +def str_eq_lens : Set (String × Nat) := { p | str_eq_len p.1 p.2 } + + + +/- +(5) Use "example" in Lean to state and prove the proposition +that ⟨ "I love Logic!", 13 ⟩ ∈ str_eq_lens. +-/ + +-- Here + +example : ⟨"I love Logic!",13 ⟩ ∈ str_eq_lens := rfl + + + +/- +(6) Use "example" in Lean again to state and prove that +⟨ "I love Logic!", 1 ⟩ ∉ str_eq_lens. That's shorthand +notation for ¬("I love Logic!", 1⟩ ∈ str_eq_lens. And you +know what that means. +-/ + +-- Here + +example : ⟨"I love Logic!",1⟩ ∉ str_eq_lens := +λ (t : ⟨"I love Logic!",1⟩ ∈ str_eq_lens) => nomatch t + /-! ## Quantifiers diff --git a/Student/lecture_19.lean b/Student/lecture_19.lean new file mode 100644 index 00000000..3214cf87 --- /dev/null +++ b/Student/lecture_19.lean @@ -0,0 +1,56 @@ +/-! +# Quantifiers + +Quantifiers are part of the syntax of predicate logic. They allow one +to assert that every object (∀) of some type has some property, or that +there exists (∃) (there is) at least one object of a given type with a +specified property. The syntax of such propositions is as follows: + +- ∀ (x : T), P x +- ∃ (x : T), P x + +## Universal Quantification + +The first proposition can be read as asserting that every value *x* of +type *T* satisfies predicate *P*. Another way to say this is that for +every *x* there is a proof of the proposition, *P x*. Another way to +say that is that there's a function that when given any *x* returns a +proof of *P x*. Indeed, that's how we prove such a proposition: show +that if given any *x* you can produce and return a proof of *P x*. +-/ + +example : ∀ (n : ℕ), True := fun n => True.intro + +#check ∀ (n : Nat), True + +-- dependent pair, the second value depends on the first value + +example : ∃ (n:Nat), True := ⟨ 3, True.intro ⟩ + +example : ∃ (n:Nat), n%2=0 := ⟨ 4, rfl ⟩ + + +/- +a proof of a forall proposition, is really just a proof that for any arbitrarily +selected element of the quantified type, there is a proof of the predicate applied to that object +-/ + +def zornz'' (n : Nat) : n = 0 ∨ n ≠ 0 := +match n with + | 0 => Or.inl rfl -- proves an equality + | n' + 1 => Or.inr (fun _ => nomatch n') + +def zornz' : (n : Nat) → n = 0 ∨ n ≠ 0 + | 0 => Or.inl rfl -- proves an equality + | n' + 1 => Or.inr (fun _ => nomatch n') + +def zornz : ∀ (n : Nat), n = 0 ∨ n ≠ 0 + | 0 => Or.inl rfl -- proves an equality + | n' + 1 => Or.inr (fun _ => nomatch n') + + +/-! +### ∃ (there exists) +-/ + +def sl5 : ∃ (s : String), s.length = 5 := ⟨"Hello", rfl ⟩ From 2dab361f530728ba455a7437af39000bdd8ec57b Mon Sep 17 00:00:00 2001 From: rahilkarkar Date: Fri, 24 Nov 2023 18:56:16 +0000 Subject: [PATCH 15/15] f --- Student/hw10.lean | 330 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 330 insertions(+) create mode 100644 Student/hw10.lean diff --git a/Student/hw10.lean b/Student/hw10.lean new file mode 100644 index 00000000..4ded74b6 --- /dev/null +++ b/Student/hw10.lean @@ -0,0 +1,330 @@ +/-! +# Quantifiers: Existential Quantification (∃) + +We now turn to the second of the two quantifiers in +predicate logic: the existential operator, *∃.* It is +used to write propositions of the form, *∃ (x : T), P x*. +This proposition is read as asserting that *there is some +(at least one) value of type, T, that satisfies P*. As +an example, we repeat our definition of the *is_even* +predicate, and then write a proposition asserts that +there is (there exists) *some* even natural number. +-/ + +-- Predicate: defines property of *being even* +def is_even : Nat → Prop := λ n => n % 2 = 0 + +-- Proposition: there exists an even number +#check ∃ (n : Nat), is_even n + +/-! +## Introduction + +In the *constructive* logic of Lean, a proof of +a proposition, *∃ (x : T), P x*, has two parts. It's +a kind of ordered pair. The first element is a specific +value, *w : T*, that satisfies *P*. The second element +is a proof that *w* satisfies *P* (a proof of *P w*). +That we have an object *w* along with a proof of *P w* +shows that there does exist some object with property +*P* (namely *w*). + +This introduction rule in Lean is called *Exists.intro.* +It takes two arguments: (1) a value, *w : T*, and a proof +of *P w*. Here's a simple example showing that there exists +an even number, with *4* as a witness. +-/ + +example : exists (n : Nat), is_even n := Exists.intro 4 rfl + +/-! +The witness is 4 and the proof (computed by rfl) is a +proof of 4 % 2 = 0, which is to say, of 0 = 0. Try *5* +instead of *4* to see what happens. +-/ + +/-! +Lean provides ⟨ _, _ ⟩ as a notation for Exists.intro. +-/ + +example : exists (n : Nat), is_even n := ⟨ 4, rfl ⟩ + +/-! +Another example: Suppose we have a proof that Iris is +a blue dog. Can we prove that there exists a blue dog? +-/ + +namespace bluedog +variable + (Dog : Type) -- There are dogs + (Iris : Dog) -- Iris is one + (Blue : Dog → Prop) -- The property of being blue + (iris_is_blue : Blue Iris) -- Proof that Iris is blue + +-- A proof that there exists a blue dog +example : ∃ (d : Dog), Blue d := ⟨ Iris, iris_is_blue ⟩ +end bluedog + +/-! +## An Aside on Constructive Logic + +The term *constructive* here means that to prove that +something with a particular property exists, you have +to actually have such an object (along with a proof). +Mathematicians generally do *not* require constructive +proofs. In other words, mathematicians are often happy +to show that something must exist even if they can't +construct an actual example. + +We call proofs of this kind non-constructive. We saw +a similar issue arise with proofs of disjunctions. In +particular, we saw that a *constructive* proof of a +disjunction, *X ∨ ¬X,* requires either a proof of *X* +or a proof of *¬X*. Accepting the law of the excluded +middle as an axiom permits non-constructive reasoning +by accepting that *X ∨ ¬X* is true without the need +to construct a proof of either case. + +What one gains by accepting non-constructive reasoning +is the ability to prove more theorems. For example, we +can prove all four of DeMorgan's laws if we accept the +law of the excluded middle, but only three of them if +not. + +So what does a non-constructive proof of existence look +like? Here's a good example. Suppose you have an infinite +sequence of non-empty sets, *{ s₀, s₁, ...}. Does there +*exist* a set containing one element from each of the sets? + +It might seem obvious that there is such a set; and in +many cases, such a set can be *constructed*. For example, +suppose we have an infinite sequence of sets of natural +numbers (e.g., { {1, 2}, {3, 4, 5}, ... }). The key fact +here is that every such set has a smallest value. We can +use this fact to define a *choice function* that, when +given any such set, returns its smallest value. We can +then use this choice function to define a set containing +one element from each of the sets, namely the smallest +one. + +There is no such choice function for sets of real numbers, +however. Certainly not every such set has a smallest value: +just consider the set {1, 1/2, 1/4, 1/8, ...}. It does not +contain a smallest number, because no matter what non-zero +number you pick (say 1/8) you can always divide it by 2 to +get an even smaller one. Given such a set there's no choice +function that can reliably returns a value from each set. + +As it turns out, whether you accept that there exists a +set of elements one from each of an infinity of sets, or +not, is your decision. If you want to operate assuming that +there is such a set, then you accept what mathematicians +call the *axiom of choice*. It's another axiom you can add +to the constructive logic of Lean without causing any kind +of contradictions to arise. + +The axiom of choice is clearly non-constructive: it gives +you proofs of the existence of such sets for free. Most +working mathematicians today freely accept the axiom of +choice, and so they accept non-constructive reasoning. + +Is there a downside to such non-constructive reasoning? +Constructive mathematicians argue yes, that it leads to +the ability to prove highly counter-intuitive results. +One of these is called the *Banach-Tarski* paradox: a +proof (using the axiom of choice) that there is a way +cut up and reassemble a sphere that doubles its volume! +(Wikipedia article here.)[https://en.wikipedia.org/wiki/Banach%E2%80%93Tarski_paradox] +-/ + +/-! +As with excluded middle, you can easily add the axiom +of choice to your Lean environment to enable classical +(non-constructive) reasoning in Lean. We will not look +further into this possibility in this class. +-/ + +/-! +## Elimination Rule for ∃ + +Now suppose you have a proof of a proposition, *∃ (x : T), +P x*. That is, suppose you have *pf : ∃ (x : T), P x.* How +can you *use* such a proof? + +Here's the key idea: if you know that *∃ (x : T), P x*, +then you can deduce two facts: (1) there is some object, +call it *(w : T),* for which, (2) there is a proof, *pw*, +that *w* satisfies *P* (a proof of *P w*). The elimination +rule gives us these objects to work with. + +Recall that the introduction rule takes a specific value, +*w*, and proof, *pf : P w,* that that value has property +*P*. Elimination destructures such a proof. What is gives +you back, however, is not the *specific* witness used to +create the proof, but rather than arbitrary value, *w : T*, +along with a proof of *P w*. For this reason, you will see +that proofs of existence are called *information hiding* +objects. A specific witness is no longer availabe from a +proof of existence. + +The easiest way to apply elimination is by pattern matching, +as in the following example. It shows that if there exists a +number that's true and even, then there's a natural number +that's even. Note that what matching gives you is not the +specific value used to form the proof, but an *arbitrary* +value, *w* and a proof *pf : P w.* That is what you have +to work with after applying the elimination rule. +-/ + +/-! +### Examples + +Here's an example. We want to show that if we have a proof, +*pf*, that there's a natural number, *n*, that satsifies +*True* and *is_even*, then there's a natural number, *f*, +that satisfies just *is_even*. +-/ + +def ex1 : (∃ (n : Nat), True ∧ is_even n) → (∃ (f : Nat), is_even f) +| ⟨ w, pf_w ⟩ => Exists.intro w pf_w.right + +/-! +To show this we destructure *pf* as *⟨ w, pf_w ⟩*. This +gives us a witness, *w : Nat* (whose value we do not know), +along with a proof, *pf_w*, that *w* (whatever value it is) +satifies both *True* and *is_even.* Surely then *w* satisfies +*is_even* by itself. That's the insight. + +We can thus form the desired proof by applying Exists.intro +to *w* and a proof that *w* satisfies *is_even.* Here *w* +is the witness (value unknown) obtained by destructuring the +assumed proof of the premise. We know it's and so will be able +to use it as a witness in a proof that there is an even number. +Now *pf_w* is then an assumed proof that *w* satisfies both +*True* and *is_even*. From this proof we can derive a proof +that *w* satisfies *is_even* (by and elimination right). To +prove there exists an even number, then, we just apply +Exists.intro to *w* and to *pf_w.right*. (You can use *.2* +instead of *.right* in this expression). + +In English we might say this. Prove that if there's a number +that is True and even then there's a number that's even. + +Proof: Assume there's a number that is True and even. We +can then deduce that there is number, *w*, for which there +is a proof, *pf* that *w* is *True* and *w* is *even*. +From that proof, *pf,* by *and elimination right,* we can +deduce there's a proof, *pf_w_even*, that *w* is even. +So we now have a witness, *w*, and a proof that *w* is +even, so we can form a proof that there exists a number +that's even: ⟨ w, pf_w_even ⟩. +-/ + +/-! +## Worked Exercise + +Formalize and prove the proposition that if there's +someone everyone loves, then everyone loves someone. + +An informal, English language proof is a good way to +start. + +Proof. Assume there exists someone, let's call them *Beau*, +whom every person, *p*, loves. What we need to show is that +everyone loves someone. To prove this generaliation, we'll +assume that *p* is an *arbitrary* person and will show that +there is someone *p* loves. But everyone loves *beau* so, +by universal specialization, *p* loves Beau. Because *p* is +arbitrary, this shows (by *forall introduction*) that every +person loves someone (namely *beau*). +-/ + +namespace cs2120f23 +variable + (Person : Type) + (Loves : Person → Person → Prop) + +example : + -- if there's someone everyone loves + (∃ (beau : Person), ∀ (p : Person), Loves p beau) → + -- then everyone loves someone + (∀ (p : Person), ∃ (q : Person), Loves p q) + +-- call the person everyone loves beau +-- call the proof everyone loves beau everyone_loves_beau +| ⟨ beau, everyone_loves_beau ⟩ => +-- prove everyone loves someone by ∀ introduction +-- assume you're given an arbitrary person, p + fun (p : Person) => +-- then show that there exists someone p loves +-- with beau as a witness +-- and a proof p loves beau (by universal specialization) + ⟨beau, (everyone_loves_beau p)⟩ +end cs2120f23 + +/-! +Here's the same logical story presented in a +more abstract form, using *T* instead of *Person* +and *R : T → T → Prop* to represent the binary +relation (previously *Loves*) on objects of type +*T*. +-/ + +variable + (T : Type) + (R : T → T → Prop) + +-- Here +example : (∃ (p : T), (∀ (t : T), R t p)) → + (∀ (p : T), (∃ (t : T), R p t)) +| ⟨ w, pf_w ⟩ => (fun (p : T) => ⟨ w, pf_w p ⟩) + +/-! +In mathematical English: Given a binary relation, +*R*, on objects of type *T*, if there's some *p* +such that forall *t*, *R t p* (every *t* is related +to *p* by *R*), then for every *p* there is some *t* +such that *R p t* (every *p* is related to some *t*). +In particular, every *p* is related to *w*, the person +*everyone* loves. So everyone loves someone. +-/ + +/-! +## Homework +-/ + +variable + (Dog : Type) + (Cat : Type) + (dogLikesCat : Dog → Cat → Prop) + (dogLikesDog : Dog → Dog → Prop) + (catLikesCat : Cat → Cat → Prop) + (c : Cat) + (d : Cat) + (pf_c : ∀ (c d : Cat), catLikesCat c d) + +-- 1. Every dog likes some cat. + +#check ∀ (d : Dog), ∃ (c : Cat), dogLikesCat d c + +-- 2. If any dog, d, likes any dog, g, and that dog, g, likes any dog, w, then d likes w. + +#check ∀ (d w g : Dog), dogLikesDog d g → dogLikesDog g w → dogLikesDog d w + +-- 3. If any cat, c, likes any cat, d, then d also likes c. + +#check ∀ (c d : Cat), catLikesCat c d → catLikesCat d c + +-- 4. Every cat, c, likes itself. + +#check ∀ (c : Cat), catLikesCat c c +-- or +#check ∃ (c : Cat), catLikesCat c c + +-- 5. If every cat likes every other cat, and c and d are cats, then c likes d. + +#check ∀ (c d : Cat), catLikesCat c d + +-- Finally, give a formal proof in Lean of the proposition in problem #5. + +#check pf_c c d