1 # localrepo.py - read/write repository class for mercurial
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
8 from node import bin, hex, nullid, nullrev, short
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
14 import match as match_
15 import merge as merge_
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
33 self.ui = baseui.copy()
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
41 if not os.path.isdir(self.path):
43 if not os.path.exists(path):
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
59 reqfile.write("%s\n" % r)
62 raise error.RepoError(_("repository %s not found") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
69 requirements = set(self.opener("requires").read().splitlines())
71 if inst.errno != errno.ENOENT:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
76 self.sharedpath = self.path
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 if inst.errno != errno.ENOENT:
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
101 self.branchcache = None
102 self._ubranchcache = None # UTF-8 version of branchcache
103 self._branchcachetip = None
104 self.nodetagscache = None
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
116 self.sopener.defversion = c.version
121 return manifest.manifest(self.sopener)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
127 def __getitem__(self, changeid):
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
132 def __nonzero__(self):
136 return len(self.changelog)
139 for i in xrange(len(self)):
143 return 'file:' + self.root
145 def hook(self, name, throw=False, **args):
146 return hook.hook(self.ui, self, name, throw, **args)
148 tag_disallowed = ':\r\n'
150 def _tag(self, names, node, message, local, user, date, extra={}):
151 if isinstance(names, str):
155 allchars = ''.join(names)
156 for c in self.tag_disallowed:
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
164 def writetags(fp, names, munge, prevtags):
166 if prevtags and prevtags[-1] != '\n':
169 m = munge and munge(name) or name
170 if self._tagtypes and name in self._tagtypes:
171 old = self._tags.get(name, nullid)
172 fp.write('%s %s\n' % (hex(old), m))
173 fp.write('%s %s\n' % (hex(node), m))
179 fp = self.opener('localtags', 'r+')
181 fp = self.opener('localtags', 'a')
185 # local tags are stored in the current charset
186 writetags(fp, names, None, prevtags)
188 self.hook('tag', node=hex(node), tag=name, local=local)
192 fp = self.wfile('.hgtags', 'rb+')
194 fp = self.wfile('.hgtags', 'ab')
198 # committed tags are stored in UTF-8
199 writetags(fp, names, encoding.fromlocal, prevtags)
201 if '.hgtags' not in self.dirstate:
202 self.add(['.hgtags'])
204 m = match_.exact(self.root, '', ['.hgtags'])
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
208 self.hook('tag', node=hex(node), tag=name, local=local)
212 def tag(self, names, node, message, local, user, date):
213 '''tag a revision with one or more symbolic names.
215 names is a list of strings or, when adding a single tag, names may be a
218 if local is True, the tags are stored in a per-repository file.
219 otherwise, they are stored in the .hgtags file, and a new
220 changeset is committed with the change.
224 local: whether to store tags in non-version-controlled file
227 message: commit message to use if committing
229 user: name of user to use if committing
231 date: date tuple to use if committing'''
233 for x in self.status()[:5]:
235 raise util.Abort(_('working copy of .hgtags is changed '
236 '(please commit .hgtags manually)'))
238 self.tags() # instantiate the cache
239 self._tag(names, node, message, local, user, date)
242 '''return a mapping of tag to node'''
243 if self._tags is None:
244 (self._tags, self._tagtypes) = self._findtags()
249 '''Do the hard work of finding tags. Return a pair of dicts
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 maps tag name to a string like \'global\' or \'local\'.
252 Subclasses or extensions are free to add their own tags, but
253 should be aware that the returned dicts will be retained for the
254 duration of the localrepo object.'''
256 # XXX what tagtype should subclasses/extensions use? Currently
257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 # Should each extension invent its own tag type? Should there
259 # be one tagtype for all such "virtual" tags? Or is the status
262 alltags = {} # map tag name to (node, hist)
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
268 # Build the return dicts. Have to re-encode tag names because
269 # the tags module always uses UTF-8 (in order not to lose info
270 # writing to the cache), but the rest of Mercurial wants them in
273 for (name, (node, hist)) in alltags.iteritems():
275 tags[encoding.tolocal(name)] = node
276 tags['tip'] = self.changelog.tip()
277 tagtypes = dict([(encoding.tolocal(name), value)
278 for (name, value) in tagtypes.iteritems()])
279 return (tags, tagtypes)
281 def tagtype(self, tagname):
283 return the type of the given tag. result can be:
285 'local' : a local tag
286 'global' : a global tag
287 None : tag does not exist
292 return self._tagtypes.get(tagname)
295 '''return a list of tags ordered by revision'''
297 for t, n in self.tags().iteritems():
299 r = self.changelog.rev(n)
301 r = -2 # sort to the beginning of the list if unknown
303 return [(t, n) for r, t, n in sorted(l)]
305 def nodetags(self, node):
306 '''return the tags associated with a node'''
307 if not self.nodetagscache:
308 self.nodetagscache = {}
309 for t, n in self.tags().iteritems():
310 self.nodetagscache.setdefault(n, []).append(t)
311 return self.nodetagscache.get(node, [])
313 def _branchtags(self, partial, lrev):
314 # TODO: rename this function?
315 tiprev = len(self) - 1
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
323 tip = self.changelog.tip()
324 if self.branchcache is not None and self._branchcachetip == tip:
325 return self.branchcache
327 oldtip = self._branchcachetip
328 self._branchcachetip = tip
329 if self.branchcache is None:
330 self.branchcache = {} # avoid recursion in changectx
332 self.branchcache.clear() # keep using the same dict
333 if oldtip is None or oldtip not in self.changelog.nodemap:
334 partial, last, lrev = self._readbranchcache()
336 lrev = self.changelog.rev(oldtip)
337 partial = self._ubranchcache
339 self._branchtags(partial, lrev)
340 # this private cache holds all heads (not just tips)
341 self._ubranchcache = partial
343 # the branch cache is stored on disk as UTF-8, but in the local
345 for k, v in partial.iteritems():
346 self.branchcache[encoding.tolocal(k)] = v
347 return self.branchcache
350 def branchtags(self):
351 '''return a dict where branch names map to the tipmost head of
352 the branch, open heads come before closed'''
354 for bn, heads in self.branchmap().iteritems():
356 for i in range(len(heads)-1, -1, -1):
358 if 'close' not in self.changelog.read(h)[5]:
361 # no open heads were found
368 def _readbranchcache(self):
371 f = self.opener("branchheads.cache")
372 lines = f.read().split('\n')
374 except (IOError, OSError):
375 return {}, nullid, nullrev
378 last, lrev = lines.pop(0).split(" ", 1)
379 last, lrev = bin(last), int(lrev)
380 if lrev >= len(self) or self[lrev].node() != last:
381 # invalidate the cache
382 raise ValueError('invalidating branch cache (tip differs)')
385 node, label = l.split(" ", 1)
386 partial.setdefault(label.strip(), []).append(bin(node))
387 except KeyboardInterrupt:
389 except Exception, inst:
390 if self.ui.debugflag:
391 self.ui.warn(str(inst), '\n')
392 partial, last, lrev = {}, nullid, nullrev
393 return partial, last, lrev
395 def _writebranchcache(self, branches, tip, tiprev):
397 f = self.opener("branchheads.cache", "w", atomictemp=True)
398 f.write("%s %s\n" % (hex(tip), tiprev))
399 for label, nodes in branches.iteritems():
401 f.write("%s %s\n" % (hex(node), label))
403 except (IOError, OSError):
406 def _updatebranchcache(self, partial, start, end):
407 # collect new branch entries
409 for r in xrange(start, end):
411 newbranches.setdefault(c.branch(), []).append(c.node())
412 # if older branchheads are reachable from new ones, they aren't
413 # really branchheads. Note checking parents is insufficient:
414 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
415 for branch, newnodes in newbranches.iteritems():
416 bheads = partial.setdefault(branch, [])
417 bheads.extend(newnodes)
421 # starting from tip means fewer passes over reachable
423 latest = newnodes.pop()
424 if latest not in bheads:
426 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 reachable = self.changelog.reachable(latest, minbhrev)
428 bheads = [b for b in bheads if b not in reachable]
429 newbheads.insert(0, latest)
430 bheads.extend(newbheads)
431 partial[branch] = bheads
433 def lookup(self, key):
434 if isinstance(key, int):
435 return self.changelog.node(key)
437 return self.dirstate.parents()[0]
441 return self.changelog.tip()
442 n = self.changelog._match(key)
445 if key in self.tags():
446 return self.tags()[key]
447 if key in self.branchtags():
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
462 raise error.RepoError(_("unknown revision '%s'") % key)
468 return os.path.join(self.path, f)
471 return os.path.join(self.root, f)
474 return os.path.join(self.root, util.pconvert(f))
479 return filelog.filelog(self.sopener, f)
481 def changectx(self, changeid):
482 return self[changeid]
484 def parents(self, changeid=None):
485 '''get list of changectxs for parents of changeid'''
486 return self[changeid].parents()
488 def filectx(self, path, changeid=None, fileid=None):
489 """changeid can be a changeset revision, node, or tag.
490 fileid can be a file revision or node."""
491 return context.filectx(self, path, changeid, fileid)
494 return self.dirstate.getcwd()
496 def pathto(self, f, cwd=None):
497 return self.dirstate.pathto(f, cwd)
499 def wfile(self, f, mode='r'):
500 return self.wopener(f, mode)
503 return os.path.islink(self.wjoin(f))
505 def _filter(self, filter, filename, data):
506 if filter not in self.filterpats:
508 for pat, cmd in self.ui.configitems(filter):
511 mf = match_.match(self.root, '', [pat])
514 for name, filterfn in self._datafilters.iteritems():
515 if cmd.startswith(name):
517 params = cmd[len(name):].lstrip()
520 fn = lambda s, c, **kwargs: util.filter(s, c)
521 # Wrap old filters not supporting keyword arguments
522 if not inspect.getargspec(fn)[2]:
524 fn = lambda s, c, **kwargs: oldfn(s, c)
525 l.append((mf, fn, params))
526 self.filterpats[filter] = l
528 for mf, fn, cmd in self.filterpats[filter]:
530 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
536 def adddatafilter(self, name, filter):
537 self._datafilters[name] = filter
539 def wread(self, filename):
540 if self._link(filename):
541 data = os.readlink(self.wjoin(filename))
543 data = self.wopener(filename, 'r').read()
544 return self._filter("encode", filename, data)
546 def wwrite(self, filename, data, flags):
547 data = self._filter("decode", filename, data)
549 os.unlink(self.wjoin(filename))
553 self.wopener.symlink(data, filename)
555 self.wopener(filename, 'w').write(data)
557 util.set_flags(self.wjoin(filename), False, True)
559 def wwritedata(self, filename, data):
560 return self._filter("decode", filename, data)
562 def transaction(self):
563 tr = self._transref and self._transref() or None
564 if tr and tr.running():
567 # abort here if the journal already exists
568 if os.path.exists(self.sjoin("journal")):
569 raise error.RepoError(_("journal already exists - run hg recover"))
571 # save dirstate for rollback
573 ds = self.opener("dirstate").read()
576 self.opener("journal.dirstate", "w").write(ds)
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 (self.join("journal.branch"), self.join("undo.branch"))]
582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 self.sjoin("journal"),
585 self.store.createmode)
586 self._transref = weakref.ref(tr)
592 if os.path.exists(self.sjoin("journal")):
593 self.ui.status(_("rolling back interrupted transaction\n"))
594 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
598 self.ui.warn(_("no interrupted transaction available\n"))
608 if os.path.exists(self.sjoin("undo")):
609 self.ui.status(_("rolling back last transaction\n"))
610 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
613 branch = self.opener("undo.branch").read()
614 self.dirstate.setbranch(branch)
616 self.ui.warn(_("Named branch could not be reset, "
617 "current branch still is: %s\n")
618 % encoding.tolocal(self.dirstate.branch()))
620 self.dirstate.invalidate()
623 self.ui.warn(_("no rollback information available\n"))
627 def invalidate(self):
628 for a in "changelog manifest".split():
629 if a in self.__dict__:
632 self._tagtypes = None
633 self.nodetagscache = None
634 self.branchcache = None
635 self._ubranchcache = None
636 self._branchcachetip = None
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 except error.LockHeld, inst:
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
646 # default to 600 seconds timeout
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 releasefn, desc=desc)
653 def lock(self, wait=True):
654 '''Lock the repository store (.hg/store) and return a weak reference
655 to the lock. Use this before modifying the store (e.g. committing or
656 stripping). If you are opening a transaction, get a lock as well.)'''
657 l = self._lockref and self._lockref()
658 if l is not None and l.held:
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 _('repository %s') % self.origroot)
664 self._lockref = weakref.ref(l)
667 def wlock(self, wait=True):
668 '''Lock the non-store parts of the repository (everything under
669 .hg except .hg/store) and return a weak reference to the lock.
670 Use this before modifying files in .hg.'''
671 l = self._wlockref and self._wlockref()
672 if l is not None and l.held:
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
679 self._wlockref = weakref.ref(l)
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
684 commit an individual file as part of a larger transaction
689 flog = self.file(fname)
690 fparent1 = manifest1.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
694 copy = fctx.renamed()
695 if copy and copy[0] != fname:
696 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
702 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
706 # bar in rev2 and foo in rev1
708 # this allows this merge to succeed:
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
716 crev = manifest1.get(cfname)
717 newfparent = fparent2
719 if manifest2: # branch merge
720 if fparent2 == nullid or crev is None: # copied on remote side
721 if cfname in manifest2:
722 crev = manifest2[cfname]
723 newfparent = fparent1
725 # find source in nearest ancestor if we've lost track
727 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
729 for ancestor in self['.'].ancestors():
730 if cfname in ancestor:
731 crev = ancestor[cfname].filenode()
734 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
735 meta["copy"] = cfname
736 meta["copyrev"] = hex(crev)
737 fparent1, fparent2 = nullid, newfparent
738 elif fparent2 != nullid:
739 # is one parent an ancestor of the other?
740 fparentancestor = flog.ancestor(fparent1, fparent2)
741 if fparentancestor == fparent1:
742 fparent1, fparent2 = fparent2, nullid
743 elif fparentancestor == fparent2:
746 # is the file changed?
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 changelist.append(fname)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
751 # are just the flags changed during merge?
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 changelist.append(fname)
757 def commit(self, text="", user=None, date=None, match=None, force=False,
758 editor=False, extra={}):
759 """Add a new revision to current repository.
761 Revision information is gathered from the working directory,
762 match can be used to filter the committed files. If editor is
763 supplied, it is called to get a commit message.
767 raise util.Abort('%s: %s' % (f, msg))
770 match = match_.always(self.root, '')
774 match.dir = vdirs.append
779 p1, p2 = self.dirstate.parents()
782 if (not force and p2 != nullid and match and
783 (match.files() or match.anypats())):
784 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
787 changes = self.status(match=match, clean=force)
789 changes[0].extend(changes[6]) # mq may commit unchanged files
793 for s in wctx.substate:
794 if match(s) and wctx.sub(s).dirty():
796 if subs and '.hgsubstate' not in changes[0]:
797 changes[0].insert(0, '.hgsubstate')
799 # make sure all explicit patterns are matched
800 if not force and match.files():
801 matched = set(changes[0] + changes[1] + changes[2])
803 for f in match.files():
804 if f == '.' or f in matched or f in wctx.substate:
806 if f in changes[3]: # missing
807 fail(f, _('file not found!'))
808 if f in vdirs: # visited directory
814 fail(f, _("no match under directory!"))
815 elif f not in self.dirstate:
816 fail(f, _("file not tracked!"))
818 if (not force and not extra.get("close") and p2 == nullid
819 and not (changes[0] or changes[1] or changes[2])
820 and self[None].branch() == self['.'].branch()):
823 ms = merge_.mergestate(self)
825 if f in ms and ms[f] == 'u':
826 raise util.Abort(_("unresolved merge conflicts "
829 cctx = context.workingctx(self, (p1, p2), text, user, date,
832 cctx._text = editor(self, cctx, subs)
836 state = wctx.substate.copy()
838 self.ui.status(_('committing subrepository %s\n') % s)
839 sr = wctx.sub(s).commit(cctx._text, user, date)
840 state[s] = (state[s][0], sr)
841 subrepo.writestate(self, state)
843 ret = self.commitctx(cctx, True)
845 # update dirstate and mergestate
846 for f in changes[0] + changes[1]:
847 self.dirstate.normal(f)
849 self.dirstate.forget(f)
850 self.dirstate.setparents(ret)
858 def commitctx(self, ctx, error=False):
859 """Add a new revision to current repository.
861 Revision information is passed via the context argument.
865 removed = ctx.removed()
866 p1, p2 = ctx.p1(), ctx.p2()
867 m1 = p1.manifest().copy()
871 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
876 tr = self.transaction()
877 trp = weakref.proxy(tr)
883 for f in sorted(ctx.modified() + ctx.added()):
884 self.ui.note(f + "\n")
887 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
889 m1.set(f, fctx.flags())
890 except (OSError, IOError):
892 self.ui.warn(_("trouble committing %s!\n") % f)
899 removed = [f for f in sorted(removed) if f in m1 or f in m2]
900 drop = [f for f in removed if f in m1]
903 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
904 p2.manifestnode(), (new, drop))
907 self.changelog.delayupdate()
908 n = self.changelog.add(mn, changed + removed, ctx.description(),
909 trp, p1.node(), p2.node(),
910 user, ctx.date(), ctx.extra().copy())
911 p = lambda: self.changelog.writepending() and self.root or ""
912 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 parent2=xp2, pending=p)
914 self.changelog.finalize(trp)
920 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
927 '''Inform the repository that nodes have been destroyed.
928 Intended for use by strip and rollback, so there's a common
929 place for anything that has to be done after destroying history.'''
930 # XXX it might be nice if we could take the list of destroyed
931 # nodes, but I don't see an easy way for rollback() to do that
933 # Ensure the persistent tag cache is updated. Doing it now
934 # means that the tag cache only has to worry about destroyed
935 # heads immediately after a strip/rollback. That in turn
936 # guarantees that "cachetip == currenttip" (comparing both rev
937 # and node) always means no nodes have been added or destroyed.
939 # XXX this is suboptimal when qrefresh'ing: we strip the current
940 # head, refresh the tag cache, then immediately add a new head.
941 # But I think doing it this way is necessary for the "instant
942 # tag cache retrieval" case to work.
943 tags_.findglobaltags(self.ui, self, {}, {})
945 def walk(self, match, node=None):
947 walk recursively through the directory tree or a given
948 changeset, finding all files matched by the match
951 return self[node].walk(match)
953 def status(self, node1='.', node2=None, match=None,
954 ignored=False, clean=False, unknown=False):
955 """return status of files between two nodes or node and working directory
957 If node1 is None, use the first dirstate parent instead.
958 If node2 is None, compare node1 with working directory.
962 mf = ctx.manifest().copy()
968 if isinstance(node1, context.changectx):
972 if isinstance(node2, context.changectx):
977 working = ctx2.rev() is None
978 parentworking = working and ctx1 == self['.']
979 match = match or match_.always(self.root, self.getcwd())
980 listignored, listclean, listunknown = ignored, clean, unknown
982 # load earliest manifest first for caching reasons
983 if not working and ctx2.rev() < ctx1.rev():
986 if not parentworking:
989 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
992 if working: # we need to scan the working dir
993 s = self.dirstate.status(match, listignored, listclean, listunknown)
994 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
996 # check for any possibly clean files
997 if parentworking and cmp:
999 # do a full compare of any files that might have changed
1000 for f in sorted(cmp):
1001 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1002 or ctx1[f].cmp(ctx2[f].data())):
1010 # update dirstate for files that are actually clean
1013 # updating the dirstate is optional
1014 # so we don't wait on the lock
1015 wlock = self.wlock(False)
1018 self.dirstate.normal(f)
1021 except error.LockError:
1024 if not parentworking:
1025 mf1 = mfmatches(ctx1)
1027 # we are comparing working dir against non-parent
1028 # generate a pseudo-manifest for the working dir
1029 mf2 = mfmatches(self['.'])
1030 for f in cmp + modified + added:
1032 mf2.set(f, ctx2.flags(f))
1037 # we are comparing two revisions
1038 deleted, unknown, ignored = [], [], []
1039 mf2 = mfmatches(ctx2)
1041 modified, added, clean = [], [], []
1044 if (mf1.flags(fn) != mf2.flags(fn) or
1045 (mf1[fn] != mf2[fn] and
1046 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1053 removed = mf1.keys()
1055 r = modified, added, removed, deleted, unknown, ignored, clean
1056 [l.sort() for l in r]
1059 def add(self, list):
1060 wlock = self.wlock()
1068 self.ui.warn(_("%s does not exist!\n") % f)
1071 if st.st_size > 10000000:
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 " performance problems\n"
1074 "(use 'hg revert %s' to unadd the file)\n")
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 self.ui.warn(_("%s not added: only files and symlinks "
1078 "supported currently\n") % f)
1080 elif self.dirstate[f] in 'amn':
1081 self.ui.warn(_("%s already tracked!\n") % f)
1082 elif self.dirstate[f] == 'r':
1083 self.dirstate.normallookup(f)
1085 self.dirstate.add(f)
1090 def forget(self, list):
1091 wlock = self.wlock()
1094 if self.dirstate[f] != 'a':
1095 self.ui.warn(_("%s not added!\n") % f)
1097 self.dirstate.forget(f)
1101 def remove(self, list, unlink=False):
1105 util.unlink(self.wjoin(f))
1106 except OSError, inst:
1107 if inst.errno != errno.ENOENT:
1109 wlock = self.wlock()
1112 if unlink and os.path.exists(self.wjoin(f)):
1113 self.ui.warn(_("%s still exists!\n") % f)
1114 elif self.dirstate[f] == 'a':
1115 self.dirstate.forget(f)
1116 elif f not in self.dirstate:
1117 self.ui.warn(_("%s not tracked!\n") % f)
1119 self.dirstate.remove(f)
1123 def undelete(self, list):
1124 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 for p in self.dirstate.parents() if p != nullid]
1126 wlock = self.wlock()
1129 if self.dirstate[f] != 'r':
1130 self.ui.warn(_("%s not removed!\n") % f)
1132 m = f in manifests[0] and manifests[0] or manifests[1]
1133 t = self.file(f).read(m[f])
1134 self.wwrite(f, t, m.flags(f))
1135 self.dirstate.normal(f)
1139 def copy(self, source, dest):
1140 p = self.wjoin(dest)
1141 if not (os.path.exists(p) or os.path.islink(p)):
1142 self.ui.warn(_("%s does not exist!\n") % dest)
1143 elif not (os.path.isfile(p) or os.path.islink(p)):
1144 self.ui.warn(_("copy failed: %s is not a file or a "
1145 "symbolic link\n") % dest)
1147 wlock = self.wlock()
1149 if self.dirstate[dest] in '?r':
1150 self.dirstate.add(dest)
1151 self.dirstate.copy(source, dest)
1155 def heads(self, start=None):
1156 heads = self.changelog.heads(start)
1157 # sort the output in rev descending order
1158 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 return [n for (r, n) in sorted(heads)]
1161 def branchheads(self, branch=None, start=None, closed=False):
1163 branch = self[None].branch()
1164 branches = self.branchmap()
1165 if branch not in branches:
1167 bheads = branches[branch]
1168 # the cache returns heads ordered lowest to highest
1170 if start is not None:
1171 # filter out the heads that cannot be reached from startrev
1172 bheads = self.changelog.nodesbetween([start], bheads)[2]
1174 bheads = [h for h in bheads if
1175 ('close' not in self.changelog.read(h)[5])]
1178 def branches(self, nodes):
1180 nodes = [self.changelog.tip()]
1185 p = self.changelog.parents(n)
1186 if p[1] != nullid or p[0] == nullid:
1187 b.append((t, n, p[0], p[1]))
1192 def between(self, pairs):
1195 for top, bottom in pairs:
1196 n, l, i = top, [], 0
1199 while n != bottom and n != nullid:
1200 p = self.changelog.parents(n)[0]
1211 def findincoming(self, remote, base=None, heads=None, force=False):
1212 """Return list of roots of the subsets of missing nodes from remote
1214 If base dict is specified, assume that these nodes and their parents
1215 exist on the remote side and that no child of a node of base exists
1216 in both remote and self.
1217 Furthermore base will be updated to include the nodes that exists
1218 in self and remote but no children exists in self and remote.
1219 If a list of heads is specified, return only nodes which are heads
1220 or ancestors of these heads.
1222 All the ancestors of base are in self and in remote.
1223 All the descendants of the list returned are missing in self.
1224 (and so we know that the rest of the nodes are missing in remote, see
1227 return self.findcommonincoming(remote, base, heads, force)[1]
1229 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1230 """Return a tuple (common, missing roots, heads) used to identify
1231 missing nodes from remote.
1233 If base dict is specified, assume that these nodes and their parents
1234 exist on the remote side and that no child of a node of base exists
1235 in both remote and self.
1236 Furthermore base will be updated to include the nodes that exists
1237 in self and remote but no children exists in self and remote.
1238 If a list of heads is specified, return only nodes which are heads
1239 or ancestors of these heads.
1241 All the ancestors of base are in self and in remote.
1243 m = self.changelog.nodemap
1252 heads = remote.heads()
1254 if self.changelog.tip() == nullid:
1256 if heads != [nullid]:
1257 return [nullid], [nullid], list(heads)
1258 return [nullid], [], []
1260 # assume we're closer to the tip than the root
1261 # and start by examining the heads
1262 self.ui.status(_("searching for changes\n"))
1273 return base.keys(), [], []
1278 # search through remote branches
1279 # a 'branch' here is a linear segment of history, with four parts:
1280 # head, root, first parent, second parent
1281 # (a branch always has two parents (or none) by definition)
1282 unknown = remote.branches(unknown)
1290 self.ui.debug(_("examining %s:%s\n")
1291 % (short(n[0]), short(n[1])))
1292 if n[0] == nullid: # found the end of the branch
1294 elif n in seenbranch:
1295 self.ui.debug(_("branch already found\n"))
1297 elif n[1] and n[1] in m: # do we know the base?
1298 self.ui.debug(_("found incomplete branch %s:%s\n")
1299 % (short(n[0]), short(n[1])))
1300 search.append(n[0:2]) # schedule branch range for scanning
1303 if n[1] not in seen and n[1] not in fetch:
1304 if n[2] in m and n[3] in m:
1305 self.ui.debug(_("found new changeset %s\n") %
1307 fetch.add(n[1]) # earliest unknown
1310 base[p] = 1 # latest known
1313 if p not in req and p not in m:
1320 self.ui.debug(_("request %d: %s\n") %
1321 (reqcnt, " ".join(map(short, r))))
1322 for p in xrange(0, len(r), 10):
1323 for b in remote.branches(r[p:p+10]):
1324 self.ui.debug(_("received %s:%s\n") %
1325 (short(b[0]), short(b[1])))
1328 # do binary search on the branches we found
1332 for n, l in zip(search, remote.between(search)):
1337 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1340 self.ui.debug(_("found new branch changeset %s\n") %
1345 self.ui.debug(_("narrowed branch search to %s:%s\n")
1346 % (short(p), short(i)))
1347 newsearch.append((p, i))
1352 # sanity check our fetch list
1355 raise error.RepoError(_("already have changeset ")
1358 if base.keys() == [nullid]:
1360 self.ui.warn(_("warning: repository is unrelated\n"))
1362 raise util.Abort(_("repository is unrelated"))
1364 self.ui.debug(_("found new changesets starting at ") +
1365 " ".join([short(f) for f in fetch]) + "\n")
1367 self.ui.debug(_("%d total queries\n") % reqcnt)
1369 return base.keys(), list(fetch), heads
1371 def findoutgoing(self, remote, base=None, heads=None, force=False):
1372 """Return list of nodes that are roots of subsets not in remote
1374 If base dict is specified, assume that these nodes and their parents
1375 exist on the remote side.
1376 If a list of heads is specified, return only nodes which are heads
1377 or ancestors of these heads, and return a second element which
1378 contains all remote heads which get new children.
1382 self.findincoming(remote, base, heads, force=force)
1384 self.ui.debug(_("common changesets up to ")
1385 + " ".join(map(short, base.keys())) + "\n")
1387 remain = set(self.changelog.nodemap)
1389 # prune everything remote has from the tree
1390 remain.remove(nullid)
1391 remove = base.keys()
1396 for p in self.changelog.parents(n):
1399 # find every node whose parents have been pruned
1401 # find every remote head that will get new children
1402 updated_heads = set()
1404 p1, p2 = self.changelog.parents(n)
1405 if p1 not in remain and p2 not in remain:
1409 updated_heads.add(p1)
1411 updated_heads.add(p2)
1413 # this is the set of all roots we have to push
1415 return subset, list(updated_heads)
1419 def pull(self, remote, heads=None, force=False):
1422 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1424 if fetch == [nullid]:
1425 self.ui.status(_("requesting all changes\n"))
1428 self.ui.status(_("no changes found\n"))
1431 if heads is None and remote.capable('changegroupsubset'):
1435 cg = remote.changegroup(fetch, 'pull')
1437 if not remote.capable('changegroupsubset'):
1438 raise util.Abort(_("Partial pull cannot be done because "
1439 "other repository doesn't support "
1440 "changegroupsubset."))
1441 cg = remote.changegroupsubset(fetch, heads, 'pull')
1442 return self.addchangegroup(cg, 'pull', remote.url())
1446 def push(self, remote, force=False, revs=None):
1447 # there are two ways to push to remote repo:
1449 # addchangegroup assumes local user can lock remote
1450 # repo (local filesystem, old ssh servers).
1452 # unbundle assumes local user cannot lock remote repo (new ssh
1453 # servers, http servers).
1455 if remote.capable('unbundle'):
1456 return self.push_unbundle(remote, force, revs)
1457 return self.push_addchangegroup(remote, force, revs)
1459 def prepush(self, remote, force, revs):
1461 remote_heads = remote.heads()
1462 inc = self.findincoming(remote, common, remote_heads, force=force)
1464 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1465 if revs is not None:
1466 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1468 bases, heads = update, self.changelog.heads()
1470 def checkbranch(lheads, rheads, updatelh):
1472 check whether there are more local heads than remote heads on
1475 lheads: local branch heads
1476 rheads: remote branch heads
1477 updatelh: outgoing local branch heads
1482 if not revs and len(lheads) > len(rheads):
1485 updatelheads = [self.changelog.heads(x, lheads)
1487 newheads = set(sum(updatelheads, [])) & set(lheads)
1493 if r in self.changelog.nodemap:
1494 desc = self.changelog.heads(r, heads)
1495 l = [h for h in heads if h in desc]
1500 if len(newheads) > len(rheads):
1504 if not rheads: # new branch requires --force
1505 self.ui.warn(_("abort: push creates new"
1506 " remote branch '%s'!\n") %
1507 self[updatelh[0]].branch())
1509 self.ui.warn(_("abort: push creates new remote heads!\n"))
1511 self.ui.status(_("(did you forget to merge?"
1512 " use push -f to force)\n"))
1517 self.ui.status(_("no changes found\n"))
1520 # Check for each named branch if we're creating new remote heads.
1521 # To be a remote head after push, node must be either:
1523 # - a local outgoing head descended from update
1524 # - a remote head that's known locally and not
1525 # ancestral to an outgoing head
1527 # New named branches cannot be created without --force.
1529 if remote_heads != [nullid]:
1530 if remote.capable('branchmap'):
1533 localhds = self.branchmap()
1536 branch = self[n].branch()
1537 if branch in localhds:
1538 localhds[branch].append(n)
1540 localhds[branch] = [n]
1542 remotehds = remote.branchmap()
1546 rheads = remotehds[lh]
1549 lheads = localhds[lh]
1550 updatelh = [upd for upd in update
1551 if self[upd].branch() == lh]
1554 if not checkbranch(lheads, rheads, updatelh):
1557 if not checkbranch(heads, remote_heads, update):
1561 self.ui.warn(_("note: unsynced remote changes!\n"))
1565 # use the fast path, no race possible on push
1566 cg = self._changegroup(common.keys(), 'push')
1568 cg = self.changegroupsubset(update, revs, 'push')
1569 return cg, remote_heads
1571 def push_addchangegroup(self, remote, force, revs):
1572 lock = remote.lock()
1574 ret = self.prepush(remote, force, revs)
1575 if ret[0] is not None:
1576 cg, remote_heads = ret
1577 return remote.addchangegroup(cg, 'push', self.url())
1582 def push_unbundle(self, remote, force, revs):
1583 # local repo finds heads on server, finds out what revs it
1584 # must push. once revs transferred, if server finds it has
1585 # different heads (someone else won commit/push race), server
1588 ret = self.prepush(remote, force, revs)
1589 if ret[0] is not None:
1590 cg, remote_heads = ret
1591 if force: remote_heads = ['force']
1592 return remote.unbundle(cg, remote_heads, 'push')
1595 def changegroupinfo(self, nodes, source):
1596 if self.ui.verbose or source == 'bundle':
1597 self.ui.status(_("%d changesets found\n") % len(nodes))
1598 if self.ui.debugflag:
1599 self.ui.debug(_("list of changesets:\n"))
1601 self.ui.debug("%s\n" % hex(node))
1603 def changegroupsubset(self, bases, heads, source, extranodes=None):
1604 """This function generates a changegroup consisting of all the nodes
1605 that are descendents of any of the bases, and ancestors of any of
1608 It is fairly complex as determining which filenodes and which
1609 manifest nodes need to be included for the changeset to be complete
1612 Another wrinkle is doing the reverse, figuring out which changeset in
1613 the changegroup a particular filenode or manifestnode belongs to.
1615 The caller can specify some nodes that must be included in the
1616 changegroup using the extranodes argument. It should be a dict
1617 where the keys are the filenames (or 1 for the manifest), and the
1618 values are lists of (node, linknode) tuples, where node is a wanted
1619 node and linknode is the changelog node that should be transmitted as
1623 if extranodes is None:
1624 # can we go through the fast path ?
1626 allheads = self.heads()
1628 if heads == allheads:
1630 # parents of bases are known from both sides
1632 for p in self.changelog.parents(n):
1635 return self._changegroup(common, source)
1637 self.hook('preoutgoing', throw=True, source=source)
1639 # Set up some initial variables
1640 # Make it easy to refer to self.changelog
1642 # msng is short for missing - compute the list of changesets in this
1644 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1645 self.changegroupinfo(msng_cl_lst, source)
1646 # Some bases may turn out to be superfluous, and some heads may be
1647 # too. nodesbetween will return the minimal set of bases and heads
1648 # necessary to re-create the changegroup.
1650 # Known heads are the list of heads that it is assumed the recipient
1651 # of this changegroup will know about.
1653 # We assume that all parents of bases are known heads.
1655 knownheads.update(cl.parents(n))
1656 knownheads.discard(nullid)
1657 knownheads = list(knownheads)
1659 # Now that we know what heads are known, we can compute which
1660 # changesets are known. The recipient must know about all
1661 # changesets required to reach the known heads from the null
1663 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1665 # Transform the list into a set.
1666 has_cl_set = set(has_cl_set)
1668 # If there were no known heads, the recipient cannot be assumed to
1669 # know about any changesets.
1672 # Make it easy to refer to self.manifest
1673 mnfst = self.manifest
1674 # We don't know which manifests are missing yet
1676 # Nor do we know which filenodes are missing.
1677 msng_filenode_set = {}
1679 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1682 # A changeset always belongs to itself, so the changenode lookup
1683 # function for a changenode is identity.
1687 # If we determine that a particular file or manifest node must be a
1688 # node that the recipient of the changegroup will already have, we can
1689 # also assume the recipient will have all the parents. This function
1690 # prunes them from the set of missing nodes.
1691 def prune_parents(revlog, hasset, msngset):
1692 haslst = list(hasset)
1693 haslst.sort(key=revlog.rev)
1695 parentlst = [p for p in revlog.parents(node) if p != nullid]
1700 p = [p for p in revlog.parents(n) if p != nullid]
1703 msngset.pop(n, None)
1705 # This is a function generating function used to set up an environment
1706 # for the inner function to execute in.
1707 def manifest_and_file_collector(changedfileset):
1708 # This is an information gathering function that gathers
1709 # information from each changeset node that goes out as part of
1710 # the changegroup. The information gathered is a list of which
1711 # manifest nodes are potentially required (the recipient may
1712 # already have them) and total list of all files which were
1713 # changed in any changeset in the changegroup.
1715 # We also remember the first changenode we saw any manifest
1716 # referenced by so we can later determine which changenode 'owns'
1718 def collect_manifests_and_files(clnode):
1721 # This is to make sure we only have one instance of each
1722 # filename string for each filename.
1723 changedfileset.setdefault(f, f)
1724 msng_mnfst_set.setdefault(c[0], clnode)
1725 return collect_manifests_and_files
1727 # Figure out which manifest nodes (of the ones we think might be part
1728 # of the changegroup) the recipient must know about and remove them
1729 # from the changegroup.
1730 def prune_manifests():
1731 has_mnfst_set = set()
1732 for n in msng_mnfst_set:
1733 # If a 'missing' manifest thinks it belongs to a changenode
1734 # the recipient is assumed to have, obviously the recipient
1735 # must have that manifest.
1736 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1737 if linknode in has_cl_set:
1738 has_mnfst_set.add(n)
1739 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1741 # Use the information collected in collect_manifests_and_files to say
1742 # which changenode any manifestnode belongs to.
1743 def lookup_manifest_link(mnfstnode):
1744 return msng_mnfst_set[mnfstnode]
1746 # A function generating function that sets up the initial environment
1747 # the inner function.
1748 def filenode_collector(changedfiles):
1750 # This gathers information from each manifestnode included in the
1751 # changegroup about which filenodes the manifest node references
1752 # so we can include those in the changegroup too.
1754 # It also remembers which changenode each filenode belongs to. It
1755 # does this by assuming the a filenode belongs to the changenode
1756 # the first manifest that references it belongs to.
1757 def collect_msng_filenodes(mnfstnode):
1758 r = mnfst.rev(mnfstnode)
1759 if r == next_rev[0]:
1760 # If the last rev we looked at was the one just previous,
1761 # we only need to see a diff.
1762 deltamf = mnfst.readdelta(mnfstnode)
1763 # For each line in the delta
1764 for f, fnode in deltamf.iteritems():
1765 f = changedfiles.get(f, None)
1766 # And if the file is in the list of files we care
1769 # Get the changenode this manifest belongs to
1770 clnode = msng_mnfst_set[mnfstnode]
1771 # Create the set of filenodes for the file if
1772 # there isn't one already.
1773 ndset = msng_filenode_set.setdefault(f, {})
1774 # And set the filenode's changelog node to the
1775 # manifest's if it hasn't been set already.
1776 ndset.setdefault(fnode, clnode)
1778 # Otherwise we need a full manifest.
1779 m = mnfst.read(mnfstnode)
1780 # For every file in we care about.
1781 for f in changedfiles:
1782 fnode = m.get(f, None)
1783 # If it's in the manifest
1784 if fnode is not None:
1785 # See comments above.
1786 clnode = msng_mnfst_set[mnfstnode]
1787 ndset = msng_filenode_set.setdefault(f, {})
1788 ndset.setdefault(fnode, clnode)
1789 # Remember the revision we hope to see next.
1791 return collect_msng_filenodes
1793 # We have a list of filenodes we think we need for a file, lets remove
1794 # all those we know the recipient must have.
1795 def prune_filenodes(f, filerevlog):
1796 msngset = msng_filenode_set[f]
1798 # If a 'missing' filenode thinks it belongs to a changenode we
1799 # assume the recipient must have, then the recipient must have
1802 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1803 if clnode in has_cl_set:
1805 prune_parents(filerevlog, hasset, msngset)
1807 # A function generator function that sets up the a context for the
1809 def lookup_filenode_link_func(fname):
1810 msngset = msng_filenode_set[fname]
1811 # Lookup the changenode the filenode belongs to.
1812 def lookup_filenode_link(fnode):
1813 return msngset[fnode]
1814 return lookup_filenode_link
1816 # Add the nodes that were explicitly requested.
1817 def add_extra_nodes(name, nodes):
1818 if not extranodes or name not in extranodes:
1821 for node, linknode in extranodes[name]:
1822 if node not in nodes:
1823 nodes[node] = linknode
1825 # Now that we have all theses utility functions to help out and
1826 # logically divide up the task, generate the group.
1828 # The set of changed files starts empty.
1830 # Create a changenode group generator that will call our functions
1831 # back to lookup the owning changenode and collect information.
1832 group = cl.group(msng_cl_lst, identity,
1833 manifest_and_file_collector(changedfiles))
1837 # The list of manifests has been collected by the generator
1838 # calling our functions back.
1840 add_extra_nodes(1, msng_mnfst_set)
1841 msng_mnfst_lst = msng_mnfst_set.keys()
1842 # Sort the manifestnodes by revision number.
1843 msng_mnfst_lst.sort(key=mnfst.rev)
1844 # Create a generator for the manifestnodes that calls our lookup
1845 # and data collection functions back.
1846 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1847 filenode_collector(changedfiles))
1851 # These are no longer needed, dereference and toss the memory for
1853 msng_mnfst_lst = None
1854 msng_mnfst_set.clear()
1857 for fname in extranodes:
1858 if isinstance(fname, int):
1860 msng_filenode_set.setdefault(fname, {})
1861 changedfiles[fname] = 1
1862 # Go through all our files in order sorted by name.
1863 for fname in sorted(changedfiles):
1864 filerevlog = self.file(fname)
1865 if not len(filerevlog):
1866 raise util.Abort(_("empty or missing revlog for %s") % fname)
1867 # Toss out the filenodes that the recipient isn't really
1869 if fname in msng_filenode_set:
1870 prune_filenodes(fname, filerevlog)
1871 add_extra_nodes(fname, msng_filenode_set[fname])
1872 msng_filenode_lst = msng_filenode_set[fname].keys()
1874 msng_filenode_lst = []
1875 # If any filenodes are left, generate the group for them,
1876 # otherwise don't bother.
1877 if len(msng_filenode_lst) > 0:
1878 yield changegroup.chunkheader(len(fname))
1880 # Sort the filenodes by their revision #
1881 msng_filenode_lst.sort(key=filerevlog.rev)
1882 # Create a group generator and only pass in a changenode
1883 # lookup function as we need to collect no information
1885 group = filerevlog.group(msng_filenode_lst,
1886 lookup_filenode_link_func(fname))
1889 if fname in msng_filenode_set:
1890 # Don't need this anymore, toss it to free memory.
1891 del msng_filenode_set[fname]
1892 # Signal that no more groups are left.
1893 yield changegroup.closechunk()
1896 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1898 return util.chunkbuffer(gengroup())
1900 def changegroup(self, basenodes, source):
1901 # to avoid a race we use changegroupsubset() (issue1320)
1902 return self.changegroupsubset(basenodes, self.heads(), source)
1904 def _changegroup(self, common, source):
1905 """Generate a changegroup of all nodes that we have that a recipient
1908 This is much easier than the previous function as we can assume that
1909 the recipient has any changenode we aren't sending them.
1911 common is the set of common nodes between remote and self"""
1913 self.hook('preoutgoing', throw=True, source=source)
1916 nodes = cl.findmissing(common)
1917 revset = set([cl.rev(n) for n in nodes])
1918 self.changegroupinfo(nodes, source)
1923 def gennodelst(log):
1925 if log.linkrev(r) in revset:
1928 def changed_file_collector(changedfileset):
1929 def collect_changed_files(clnode):
1931 changedfileset.update(c[3])
1932 return collect_changed_files
1934 def lookuprevlink_func(revlog):
1935 def lookuprevlink(n):
1936 return cl.node(revlog.linkrev(revlog.rev(n)))
1937 return lookuprevlink
1940 # construct a list of all changed files
1941 changedfiles = set()
1943 for chnk in cl.group(nodes, identity,
1944 changed_file_collector(changedfiles)):
1947 mnfst = self.manifest
1948 nodeiter = gennodelst(mnfst)
1949 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1952 for fname in sorted(changedfiles):
1953 filerevlog = self.file(fname)
1954 if not len(filerevlog):
1955 raise util.Abort(_("empty or missing revlog for %s") % fname)
1956 nodeiter = gennodelst(filerevlog)
1957 nodeiter = list(nodeiter)
1959 yield changegroup.chunkheader(len(fname))
1961 lookup = lookuprevlink_func(filerevlog)
1962 for chnk in filerevlog.group(nodeiter, lookup):
1965 yield changegroup.closechunk()
1968 self.hook('outgoing', node=hex(nodes[0]), source=source)
1970 return util.chunkbuffer(gengroup())
1972 def addchangegroup(self, source, srctype, url, emptyok=False):
1973 """add changegroup to repo.
1976 - nothing changed or no source: 0
1977 - more heads than before: 1+added heads (2..n)
1978 - less heads than before: -1-removed heads (-2..-n)
1979 - number of heads stays the same: 1
1982 self.ui.debug(_("add changeset %s\n") % short(x))
1991 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1993 changesets = files = revisions = 0
1995 # write changelog data to temp files so concurrent readers will not see
1999 oldheads = len(cl.heads())
2001 tr = self.transaction()
2003 trp = weakref.proxy(tr)
2004 # pull off the changeset group
2005 self.ui.status(_("adding changesets\n"))
2007 chunkiter = changegroup.chunkiter(source)
2008 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2009 raise util.Abort(_("received changelog group is empty"))
2011 changesets = clend - clstart
2013 # pull off the manifest group
2014 self.ui.status(_("adding manifests\n"))
2015 chunkiter = changegroup.chunkiter(source)
2016 # no need to check for empty manifest group here:
2017 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2018 # no new manifest will be created and the manifest group will
2019 # be empty during the pull
2020 self.manifest.addgroup(chunkiter, revmap, trp)
2023 self.ui.status(_("adding file changes\n"))
2025 f = changegroup.getchunk(source)
2028 self.ui.debug(_("adding %s revisions\n") % f)
2031 chunkiter = changegroup.chunkiter(source)
2032 if fl.addgroup(chunkiter, revmap, trp) is None:
2033 raise util.Abort(_("received file revlog group is empty"))
2034 revisions += len(fl) - o
2037 newheads = len(cl.heads())
2039 if oldheads and newheads != oldheads:
2040 heads = _(" (%+d heads)") % (newheads - oldheads)
2042 self.ui.status(_("added %d changesets"
2043 " with %d changes to %d files%s\n")
2044 % (changesets, revisions, files, heads))
2047 p = lambda: cl.writepending() and self.root or ""
2048 self.hook('pretxnchangegroup', throw=True,
2049 node=hex(cl.node(clstart)), source=srctype,
2052 # make changelog see real files again
2060 # forcefully update the on-disk branch cache
2061 self.ui.debug(_("updating the branch cache\n"))
2063 self.hook("changegroup", node=hex(cl.node(clstart)),
2064 source=srctype, url=url)
2066 for i in xrange(clstart, clend):
2067 self.hook("incoming", node=hex(cl.node(i)),
2068 source=srctype, url=url)
2070 # never return 0 here:
2071 if newheads < oldheads:
2072 return newheads - oldheads - 1
2074 return newheads - oldheads + 1
2077 def stream_in(self, remote):
2078 fp = remote.stream_out()
2083 raise error.ResponseError(
2084 _('Unexpected response from remote server:'), l)
2086 raise util.Abort(_('operation forbidden by server'))
2088 raise util.Abort(_('locking the remote repository failed'))
2090 raise util.Abort(_('the server sent an unknown error code'))
2091 self.ui.status(_('streaming all changes\n'))
2094 total_files, total_bytes = map(int, l.split(' ', 1))
2095 except (ValueError, TypeError):
2096 raise error.ResponseError(
2097 _('Unexpected response from remote server:'), l)
2098 self.ui.status(_('%d files to transfer, %s of data\n') %
2099 (total_files, util.bytecount(total_bytes)))
2101 for i in xrange(total_files):
2102 # XXX doesn't support '\n' or '\r' in filenames
2105 name, size = l.split('\0', 1)
2107 except (ValueError, TypeError):
2108 raise error.ResponseError(
2109 _('Unexpected response from remote server:'), l)
2110 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2111 # for backwards compat, name was partially encoded
2112 ofp = self.sopener(store.decodedir(name), 'w')
2113 for chunk in util.filechunkiter(fp, limit=size):
2116 elapsed = time.time() - start
2119 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2120 (util.bytecount(total_bytes), elapsed,
2121 util.bytecount(total_bytes / elapsed)))
2123 return len(self.heads()) + 1
2125 def clone(self, remote, heads=[], stream=False):
2126 '''clone remote repository.
2129 heads: list of revs to clone (forces use of pull)
2130 stream: use streaming clone if possible'''
2132 # now, all clients that can request uncompressed clones can
2133 # read repo formats supported by all servers that can serve
2136 # if revlog format changes, client will have to check version
2137 # and format flags on "stream" capability, and use
2138 # uncompressed only if compatible.
2140 if stream and not heads and remote.capable('stream'):
2141 return self.stream_in(remote)
2142 return self.pull(remote, heads)
2144 # used to avoid circular references so destructors work
2145 def aftertrans(files):
2146 renamefiles = [tuple(t) for t in files]
2148 for src, dest in renamefiles:
2149 util.rename(src, dest)
2152 def instance(ui, path, create):
2153 return localrepository(ui, util.drop_scheme('file', path), create)