1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 * Concurrency-enabled mechanisms for sharing mutable and/or immutable state
17 * In this example, a large vector of floats is shared between several tasks.
18 * With simple pipes, without Arc, a copy would have to be made for each task.
28 * let numbers = slice::from_fn(100, |i| (i as f32) * rand::random());
29 * let shared_numbers = Arc::new(numbers);
31 * for _ in range(0, 10) {
32 * let (tx, rx) = channel();
33 * tx.send(shared_numbers.clone());
36 * let shared_numbers = rx.recv();
37 * let local_numbers = shared_numbers.get();
39 * // Work with the local numbers
46 #[allow(missing_doc, dead_code)];
50 use sync::{Mutex, RWLock};
53 use std::kinds::marker;
54 use std::sync::arc::UnsafeArc;
58 use std::kinds::Share;
60 /// As sync::condvar, a mechanism for unlock-and-descheduling and
61 /// signaling, for use with the Arc types.
62 pub struct ArcCondvar<'a> {
64 priv failed: &'a bool,
65 priv cond: &'a sync::Condvar<'a>
68 impl<'a> ArcCondvar<'a> {
69 /// Atomically exit the associated Arc and block until a signal is sent.
71 pub fn wait(&self) { self.wait_on(0) }
74 * Atomically exit the associated Arc and block on a specified condvar
75 * until a signal is sent on that same condvar (as sync::cond.wait_on).
77 * wait() is equivalent to wait_on(0).
80 pub fn wait_on(&self, condvar_id: uint) {
81 assert!(!*self.failed);
82 self.cond.wait_on(condvar_id);
83 // This is why we need to wrap sync::condvar.
84 check_poison(self.is_mutex, *self.failed);
87 /// Wake up a blocked task. Returns false if there was no blocked task.
89 pub fn signal(&self) -> bool { self.signal_on(0) }
92 * Wake up a blocked task on a specified condvar (as
93 * sync::cond.signal_on). Returns false if there was no blocked task.
96 pub fn signal_on(&self, condvar_id: uint) -> bool {
97 assert!(!*self.failed);
98 self.cond.signal_on(condvar_id)
101 /// Wake up all blocked tasks. Returns the number of tasks woken.
103 pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
106 * Wake up all blocked tasks on a specified condvar (as
107 * sync::cond.broadcast_on). Returns the number of tasks woken.
110 pub fn broadcast_on(&self, condvar_id: uint) -> uint {
111 assert!(!*self.failed);
112 self.cond.broadcast_on(condvar_id)
116 /****************************************************************************
118 ****************************************************************************/
120 /// An atomically reference counted wrapper for shared immutable state.
121 pub struct Arc<T> { priv x: UnsafeArc<T> }
125 * Access the underlying data in an atomically reference counted
128 impl<T: Share + Send> Arc<T> {
129 /// Create an atomically reference counted wrapper.
131 pub fn new(data: T) -> Arc<T> {
132 Arc { x: UnsafeArc::new(data) }
136 pub fn get<'a>(&'a self) -> &'a T {
137 unsafe { &*self.x.get_immut() }
141 impl<T: Share + Send> Clone for Arc<T> {
143 * Duplicate an atomically reference counted wrapper.
145 * The resulting two `arc` objects will point to the same underlying data
146 * object. However, one of the `arc` objects can be sent to another task,
147 * allowing them to share the underlying data.
150 fn clone(&self) -> Arc<T> {
151 Arc { x: self.x.clone() }
155 /****************************************************************************
156 * Mutex protected Arc (unsafe)
157 ****************************************************************************/
160 struct MutexArcInner<T> { lock: Mutex, failed: bool, data: T }
162 /// An Arc with mutable data protected by a blocking mutex.
163 pub struct MutexArc<T> {
164 priv x: UnsafeArc<MutexArcInner<T>>,
165 priv marker: marker::NoFreeze,
168 impl<T:Send> Clone for MutexArc<T> {
169 /// Duplicate a mutex-protected Arc. See arc::clone for more details.
171 fn clone(&self) -> MutexArc<T> {
172 // NB: Cloning the underlying mutex is not necessary. Its reference
173 // count would be exactly the same as the shared state's.
174 MutexArc { x: self.x.clone(),
175 marker: marker::NoFreeze, }
179 impl<T:Send> MutexArc<T> {
180 /// Create a mutex-protected Arc with the supplied data.
181 pub fn new(user_data: T) -> MutexArc<T> {
182 MutexArc::new_with_condvars(user_data, 1)
186 * Create a mutex-protected Arc with the supplied data and a specified number
187 * of condvars (as sync::Mutex::new_with_condvars).
189 pub fn new_with_condvars(user_data: T, num_condvars: uint) -> MutexArc<T> {
190 let data = MutexArcInner {
191 lock: Mutex::new_with_condvars(num_condvars),
192 failed: false, data: user_data
194 MutexArc { x: UnsafeArc::new(data),
195 marker: marker::NoFreeze, }
199 * Access the underlying mutable data with mutual exclusion from other
200 * tasks. The argument closure will be run with the mutex locked; all
201 * other tasks wishing to access the data will block until the closure
204 * If you wish to nest MutexArcs, one strategy for ensuring safety at
205 * runtime is to add a "nesting level counter" inside the stored data, and
206 * when traversing the arcs, assert that they monotonically decrease.
210 * Failing while inside the Arc will unlock the Arc while unwinding, so
211 * that other tasks won't block forever. It will also poison the Arc:
212 * any tasks that subsequently try to access it (including those already
213 * blocked on the mutex) will also fail immediately.
216 pub fn access<U>(&self, blk: |x: &mut T| -> U) -> U {
217 let state = self.x.get();
219 // Borrowck would complain about this if the code were
220 // not already unsafe. See borrow_rwlock, far below.
221 (&(*state).lock).lock(|| {
222 check_poison(true, (*state).failed);
223 let _z = PoisonOnFail::new(&mut (*state).failed);
224 blk(&mut (*state).data)
229 /// As access(), but with a condvar, as sync::mutex.lock_cond().
231 pub fn access_cond<U>(&self, blk: |x: &mut T, c: &ArcCondvar| -> U) -> U {
232 let state = self.x.get();
234 (&(*state).lock).lock_cond(|cond| {
235 check_poison(true, (*state).failed);
236 let _z = PoisonOnFail::new(&mut (*state).failed);
237 blk(&mut (*state).data,
238 &ArcCondvar {is_mutex: true,
239 failed: &(*state).failed,
246 // Common code for {mutex.access,rwlock.write}{,_cond}.
249 fn check_poison(is_mutex: bool, failed: bool) {
252 fail!("Poisoned MutexArc - another task failed inside!");
254 fail!("Poisoned rw_arc - another task failed inside!");
260 struct PoisonOnFail {
265 impl Drop for PoisonOnFail {
268 /* assert!(!*self.failed);
269 -- might be false in case of cond.wait() */
270 if !self.failed && task::failing() {
278 fn new<'a>(flag: &'a mut bool) -> PoisonOnFail {
281 failed: task::failing()
286 /****************************************************************************
287 * R/W lock protected Arc
288 ****************************************************************************/
291 struct RWArcInner<T> { lock: RWLock, failed: bool, data: T }
293 * A dual-mode Arc protected by a reader-writer lock. The data can be accessed
294 * mutably or immutably, and immutably-accessing tasks may run concurrently.
296 * Unlike mutex_arcs, rw_arcs are safe, because they cannot be nested.
298 pub struct RWArc<T> {
299 priv x: UnsafeArc<RWArcInner<T>>,
300 priv marker: marker::NoFreeze,
301 priv marker1: marker::NoShare,
304 impl<T: Share + Send> Clone for RWArc<T> {
305 /// Duplicate a rwlock-protected Arc. See arc::clone for more details.
307 fn clone(&self) -> RWArc<T> {
308 RWArc { x: self.x.clone(),
309 marker: marker::NoFreeze,
310 marker1: marker::NoShare, }
315 impl<T: Share + Send> RWArc<T> {
316 /// Create a reader/writer Arc with the supplied data.
317 pub fn new(user_data: T) -> RWArc<T> {
318 RWArc::new_with_condvars(user_data, 1)
322 * Create a reader/writer Arc with the supplied data and a specified number
323 * of condvars (as sync::RWLock::new_with_condvars).
325 pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWArc<T> {
326 let data = RWArcInner {
327 lock: RWLock::new_with_condvars(num_condvars),
328 failed: false, data: user_data
330 RWArc { x: UnsafeArc::new(data),
331 marker: marker::NoFreeze,
332 marker1: marker::NoShare, }
336 * Access the underlying data mutably. Locks the rwlock in write mode;
337 * other readers and writers will block.
341 * Failing while inside the Arc will unlock the Arc while unwinding, so
342 * that other tasks won't block forever. As MutexArc.access, it will also
343 * poison the Arc, so subsequent readers and writers will both also fail.
346 pub fn write<U>(&self, blk: |x: &mut T| -> U) -> U {
348 let state = self.x.get();
349 (*borrow_rwlock(state)).write(|| {
350 check_poison(false, (*state).failed);
351 let _z = PoisonOnFail::new(&mut (*state).failed);
352 blk(&mut (*state).data)
357 /// As write(), but with a condvar, as sync::rwlock.write_cond().
359 pub fn write_cond<U>(&self,
360 blk: |x: &mut T, c: &ArcCondvar| -> U)
363 let state = self.x.get();
364 (*borrow_rwlock(state)).write_cond(|cond| {
365 check_poison(false, (*state).failed);
366 let _z = PoisonOnFail::new(&mut (*state).failed);
367 blk(&mut (*state).data,
368 &ArcCondvar {is_mutex: false,
369 failed: &(*state).failed,
376 * Access the underlying data immutably. May run concurrently with other
381 * Failing will unlock the Arc while unwinding. However, unlike all other
382 * access modes, this will not poison the Arc.
384 pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
386 let state = self.x.get();
387 (*state).lock.read(|| {
388 check_poison(false, (*state).failed);
395 * As write(), but with the ability to atomically 'downgrade' the lock.
396 * See sync::rwlock.write_downgrade(). The RWWriteMode token must be used
397 * to obtain the &mut T, and can be transformed into a RWReadMode token by
398 * calling downgrade(), after which a &T can be obtained instead.
405 * let arc = RWArc::new(1);
406 * arc.write_downgrade(|mut write_token| {
407 * write_token.write_cond(|state, condvar| {
408 * // ... exclusive access with mutable state ...
410 * let read_token = arc.downgrade(write_token);
411 * read_token.read(|state| {
412 * // ... shared access with immutable state ...
417 pub fn write_downgrade<U>(&self, blk: |v: RWWriteMode<T>| -> U) -> U {
419 let state = self.x.get();
420 (*borrow_rwlock(state)).write_downgrade(|write_mode| {
421 check_poison(false, (*state).failed);
423 data: &mut (*state).data,
425 poison: PoisonOnFail::new(&mut (*state).failed)
431 /// To be called inside of the write_downgrade block.
432 pub fn downgrade<'a>(&self, token: RWWriteMode<'a, T>)
433 -> RWReadMode<'a, T> {
435 // The rwlock should assert that the token belongs to us for us.
436 let state = self.x.get();
443 let new_token = (*state).lock.downgrade(t);
444 // Whatever region the input reference had, it will be safe to use
445 // the same region for the output reference. (The only 'unsafe' part
446 // of this cast is removing the mutability.)
448 // Downgrade ensured the token belonged to us. Just a sanity check.
449 assert!((&(*state).data as *T as uint) == (new_data as *mut T as uint));
459 // Borrowck rightly complains about immutably aliasing the rwlock in order to
460 // lock it. This wraps the unsafety, with the justification that the 'lock'
461 // field is never overwritten; only 'failed' and 'data'.
463 fn borrow_rwlock<T: Share + Send>(state: *mut RWArcInner<T>) -> *RWLock {
464 unsafe { cast::transmute(&(*state).lock) }
467 /// The "write permission" token used for RWArc.write_downgrade().
468 pub struct RWWriteMode<'a, T> {
469 priv data: &'a mut T,
470 priv token: sync::RWLockWriteMode<'a>,
471 priv poison: PoisonOnFail,
474 /// The "read permission" token used for RWArc.write_downgrade().
475 pub struct RWReadMode<'a, T> {
477 priv token: sync::RWLockReadMode<'a>,
480 impl<'a, T: Share + Send> RWWriteMode<'a, T> {
481 /// Access the pre-downgrade RWArc in write mode.
482 pub fn write<U>(&mut self, blk: |x: &mut T| -> U) -> U {
489 token.write(|| blk(data))
494 /// Access the pre-downgrade RWArc in write mode with a condvar.
495 pub fn write_cond<U>(&mut self,
496 blk: |x: &mut T, c: &ArcCondvar| -> U)
504 token.write_cond(|cond| {
506 let cvar = ArcCondvar {
508 failed: &*poison.flag,
519 impl<'a, T: Share + Send> RWReadMode<'a, T> {
520 /// Access the post-downgrade rwlock in read mode.
521 pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
527 token.read(|| blk(data))
533 /****************************************************************************
535 ****************************************************************************/
537 pub struct CowArc<T> { priv x: UnsafeArc<T> }
539 /// A Copy-on-write Arc functions the same way as an `arc` except it allows
540 /// mutation of the contents if there is only a single reference to
541 /// the data. If there are multiple references the data is automatically
542 /// cloned and the task modifies the cloned data in place of the shared data.
543 impl<T: Clone + Send + Share> CowArc<T> {
544 /// Create a copy-on-write atomically reference counted wrapper
546 pub fn new(data: T) -> CowArc<T> {
547 CowArc { x: UnsafeArc::new(data) }
551 pub fn get<'a>(&'a self) -> &'a T {
552 unsafe { &*self.x.get_immut() }
555 /// get a mutable reference to the contents. If there are more then one
556 /// reference to the contents of the `CowArc` will be cloned
557 /// and this reference updated to point to the cloned data.
559 pub fn get_mut<'a>(&'a mut self) -> &'a mut T {
560 if !self.x.is_owned() {
561 *self = CowArc::new(self.get().clone())
563 unsafe { &mut *self.x.get() }
567 impl<T: Clone + Send + Share> Clone for CowArc<T> {
568 /// Duplicate a Copy-on-write Arc. See arc::clone for more details.
569 fn clone(&self) -> CowArc<T> {
570 CowArc { x: self.x.clone() }
576 /****************************************************************************
578 ****************************************************************************/
583 use super::{Arc, RWArc, MutexArc, CowArc};
588 fn manually_share_arc() {
589 let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
590 let arc_v = Arc::new(v);
592 let (tx, rx) = channel();
595 let arc_v: Arc<~[int]> = rx.recv();
597 let v = arc_v.get().clone();
601 tx.send(arc_v.clone());
603 assert_eq!(arc_v.get()[2], 3);
604 assert_eq!(arc_v.get()[4], 5);
606 info!("{:?}", arc_v);
610 fn test_mutex_arc_condvar() {
611 let arc = ~MutexArc::new(false);
612 let arc2 = ~arc.clone();
613 let (tx, rx) = channel();
615 // wait until parent gets in
617 arc2.access_cond(|state, cond| {
623 arc.access_cond(|state, cond| {
632 #[test] #[should_fail]
633 fn test_arc_condvar_poison() {
634 let arc = ~MutexArc::new(1);
635 let arc2 = ~arc.clone();
636 let (tx, rx) = channel();
640 arc2.access_cond(|one, cond| {
642 // Parent should fail when it wakes up.
647 arc.access_cond(|one, cond| {
655 #[test] #[should_fail]
656 fn test_mutex_arc_poison() {
657 let arc = ~MutexArc::new(1);
658 let arc2 = ~arc.clone();
659 let _ = task::try(proc() {
670 fn test_mutex_arc_nested() {
671 // Tests nested mutexes and access
672 // to underlaying data.
673 let arc = ~MutexArc::new(1);
674 let arc2 = ~MutexArc::new(*arc);
676 (*arc2).access(|mutex| {
677 (*mutex).access(|one| {
685 fn test_mutex_arc_access_in_unwind() {
686 let arc = MutexArc::new(1i);
687 let arc2 = arc.clone();
688 let _ = task::try::<()>(proc() {
692 impl Drop for Unwinder {
694 self.i.access(|num| *num += 1);
697 let _u = Unwinder { i: arc2 };
700 assert_eq!(2, arc.access(|n| *n));
703 #[test] #[should_fail]
704 fn test_rw_arc_poison_wr() {
705 let arc = RWArc::new(1);
706 let arc2 = arc.clone();
707 let _ = task::try(proc() {
717 #[test] #[should_fail]
718 fn test_rw_arc_poison_ww() {
719 let arc = RWArc::new(1);
720 let arc2 = arc.clone();
721 let _ = task::try(proc() {
730 #[test] #[should_fail]
731 fn test_rw_arc_poison_dw() {
732 let arc = RWArc::new(1);
733 let arc2 = arc.clone();
734 let _ = task::try(proc() {
735 arc2.write_downgrade(|mut write_mode| {
736 write_mode.write(|one| {
746 fn test_rw_arc_no_poison_rr() {
747 let arc = RWArc::new(1);
748 let arc2 = arc.clone();
749 let _ = task::try(proc() {
759 fn test_rw_arc_no_poison_rw() {
760 let arc = RWArc::new(1);
761 let arc2 = arc.clone();
762 let _ = task::try(proc() {
772 fn test_rw_arc_no_poison_dr() {
773 let arc = RWArc::new(1);
774 let arc2 = arc.clone();
775 let _ = task::try(proc() {
776 arc2.write_downgrade(|write_mode| {
777 let read_mode = arc2.downgrade(write_mode);
778 read_mode.read(|one| {
789 let arc = RWArc::new(0);
790 let arc2 = arc.clone();
791 let (tx, rx) = channel();
795 for _ in range(0, 10) {
805 // Readers try to catch the writer in the act
806 let mut children = ~[];
807 for _ in range(0, 5) {
808 let arc3 = arc.clone();
809 let mut builder = task::task();
810 children.push(builder.future_result());
811 builder.spawn(proc() {
818 // Wait for children to pass their asserts
819 for r in children.mut_iter() {
823 // Wait for writer to finish
826 assert_eq!(*num, 10);
831 fn test_rw_arc_access_in_unwind() {
832 let arc = RWArc::new(1i);
833 let arc2 = arc.clone();
834 let _ = task::try::<()>(proc() {
838 impl Drop for Unwinder {
840 self.i.write(|num| *num += 1);
843 let _u = Unwinder { i: arc2 };
846 assert_eq!(2, arc.read(|n| *n));
850 fn test_rw_downgrade() {
851 // (1) A downgrader gets in write mode and does cond.wait.
852 // (2) A writer gets in write mode, sets state to 42, and does signal.
853 // (3) Downgrader wakes, sets state to 31337.
854 // (4) tells writer and all other readers to contend as it downgrades.
855 // (5) Writer attempts to set state back to 42, while downgraded task
856 // and all reader tasks assert that it's 31337.
857 let arc = RWArc::new(0);
860 let mut reader_convos = ~[];
861 for _ in range(0, 10) {
862 let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
863 reader_convos.push((tx1, rx2));
864 let arcn = arc.clone();
866 rx1.recv(); // wait for downgrader to give go-ahead
868 assert_eq!(*state, 31337);
875 let arc2 = arc.clone();
876 let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
879 arc2.write_cond(|state, cond| {
880 assert_eq!(*state, 0);
886 // This shouldn't happen until after the downgrade read
887 // section, and all other readers, finish.
888 assert_eq!(*state, 31337);
895 arc.write_downgrade(|mut write_mode| {
896 write_mode.write_cond(|state, cond| {
897 tx1.send(()); // send to another writer who will wake us up
901 assert_eq!(*state, 42);
903 // send to other readers
904 for &(ref mut rc, _) in reader_convos.mut_iter() {
908 let read_mode = arc.downgrade(write_mode);
909 read_mode.read(|state| {
910 // complete handshake with other readers
911 for &(_, ref mut rp) in reader_convos.mut_iter() {
914 tx1.send(()); // tell writer to try again
915 assert_eq!(*state, 31337);
919 rx2.recv(); // complete handshake with writer
922 fn test_rw_write_cond_downgrade_read_race_helper() {
923 // Tests that when a downgrader hands off the "reader cloud" lock
924 // because of a contending reader, a writer can't race to get it
925 // instead, which would result in readers_and_writers. This tests
926 // the sync module rather than this one, but it's here because an
927 // rwarc gives us extra shared state to help check for the race.
928 // If you want to see this test fail, go to sync.rs and replace the
929 // line in RWLock::write_cond() that looks like:
930 // "blk(&ArcCondvar { order: opt_lock, ..*cond })"
931 // with just "blk(cond)".
932 let x = RWArc::new(true);
933 let (tx, rx) = channel();
938 xw.write_cond(|state, c| {
939 tx.send(()); // tell downgrader it's ok to go
941 // The core of the test is here: the condvar reacquire path
942 // must involve order_lock, so that it cannot race with a reader
943 // trying to receive the "reader cloud lock hand-off".
948 rx.recv(); // wait for writer to get in
950 x.write_downgrade(|mut write_mode| {
951 write_mode.write_cond(|state, c| {
953 // make writer contend in the cond-reacquire path
956 // make a reader task to trigger the "reader cloud lock" handoff
958 let (tx, rx) = channel();
961 xr.read(|_state| { })
963 rx.recv(); // wait for reader task to exist
965 let read_mode = x.downgrade(write_mode);
966 read_mode.read(|state| {
967 // if writer mistakenly got in, make sure it mutates state
968 // before we assert on it
969 for _ in range(0, 5) { task::deschedule(); }
970 // make sure writer didn't get in.
976 fn test_rw_write_cond_downgrade_read_race() {
977 // Ideally the above test case would have deschedule statements in it that
978 // helped to expose the race nearly 100% of the time... but adding
979 // deschedules in the intuitively-right locations made it even less likely,
980 // and I wasn't sure why :( . This is a mediocre "next best" option.
981 for _ in range(0, 8) { test_rw_write_cond_downgrade_read_race_helper(); }
985 fn test_cowarc_clone()
987 let cow0 = CowArc::new(75u);
988 let cow1 = cow0.clone();
989 let cow2 = cow1.clone();
991 assert!(75 == *cow0.get());
992 assert!(75 == *cow1.get());
993 assert!(75 == *cow2.get());
995 assert!(cow0.get() == cow1.get());
996 assert!(cow0.get() == cow2.get());
1000 fn test_cowarc_clone_get_mut()
1002 let mut cow0 = CowArc::new(75u);
1003 let mut cow1 = cow0.clone();
1004 let mut cow2 = cow1.clone();
1006 assert!(75 == *cow0.get_mut());
1007 assert!(75 == *cow1.get_mut());
1008 assert!(75 == *cow2.get_mut());
1010 *cow0.get_mut() += 1;
1011 *cow1.get_mut() += 2;
1012 *cow2.get_mut() += 3;
1014 assert!(76 == *cow0.get());
1015 assert!(77 == *cow1.get());
1016 assert!(78 == *cow2.get());
1018 // none should point to the same backing memory
1019 assert!(cow0.get() != cow1.get());
1020 assert!(cow0.get() != cow2.get());
1021 assert!(cow1.get() != cow2.get());
1025 fn test_cowarc_clone_get_mut2()
1027 let mut cow0 = CowArc::new(75u);
1028 let cow1 = cow0.clone();
1029 let cow2 = cow1.clone();
1031 assert!(75 == *cow0.get());
1032 assert!(75 == *cow1.get());
1033 assert!(75 == *cow2.get());
1035 *cow0.get_mut() += 1;
1037 assert!(76 == *cow0.get());
1038 assert!(75 == *cow1.get());
1039 assert!(75 == *cow2.get());
1041 // cow1 and cow2 should share the same contents
1042 // cow0 should have a unique reference
1043 assert!(cow0.get() != cow1.get());
1044 assert!(cow0.get() != cow2.get());
1045 assert!(cow1.get() == cow2.get());