use hir::map::DefPathHash;
use ich::{self, CachingCodemapView};
use session::config::DebugInfoLevel::NoDebugInfo;
-use ty;
+use ty::TyCtxt;
use util::nodemap::{NodeMap, ItemLocalMap};
use std::hash as std_hash;
/// a reference to the TyCtxt) and it holds a few caches for speeding up various
/// things (e.g. each DefId/DefPath is only hashed once).
pub struct StableHashingContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
- tcx: ty::TyCtxt<'a, 'gcx, 'tcx>,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
codemap: CachingCodemapView<'gcx>,
hash_spans: bool,
hash_bodies: bool,
impl<'a, 'gcx, 'tcx> StableHashingContext<'a, 'gcx, 'tcx> {
- pub fn new(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>) -> Self {
+ pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self {
let hash_spans_initial = tcx.sess.opts.debuginfo != NoDebugInfo;
let check_overflow_initial = tcx.sess.overflow_checks();
}
#[inline]
- pub fn tcx(&self) -> ty::TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.tcx
}
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
+ let hir_id = hcx.tcx.hir.node_to_hir_id(*self);
match hcx.node_id_hashing_mode {
NodeIdHashingMode::Ignore => {
// Most NodeIds in the HIR can be ignored, but if there is a
// corresponding entry in the `trait_map` we need to hash that.
// Make sure we don't ignore too much by checking that there is
// no entry in a debug_assert!().
- debug_assert!(hcx.tcx.trait_map.get(self).is_none());
+ debug_assert!(hcx.tcx.in_scope_traits(hir_id).is_none());
}
NodeIdHashingMode::HashDefPath => {
- hcx.tcx.hir.definitions().node_to_hir_id(*self).hash_stable(hcx, hasher);
+ hir_id.hash_stable(hcx, hasher);
}
NodeIdHashingMode::HashTraitsInScope => {
- if let Some(traits) = hcx.tcx.trait_map.get(self) {
+ if let Some(traits) = hcx.tcx.in_scope_traits(hir_id) {
// The ordering of the candidates is not fixed. So we hash
// the def-ids and then sort them and hash the collection.
let mut candidates: AccumulateVec<[_; 8]> =
// If this is not an empty or invalid span, we want to hash the last
// position that belongs to it, as opposed to hashing the first
// position past it.
- let span_hi = if self.hi > self.lo {
+ let span_hi = if self.hi() > self.lo() {
// We might end up in the middle of a multibyte character here,
// but that's OK, since we are not trying to decode anything at
// this position.
- self.hi - ::syntax_pos::BytePos(1)
+ self.hi() - ::syntax_pos::BytePos(1)
} else {
- self.hi
+ self.hi()
};
{
- let loc1 = hcx.codemap().byte_pos_to_line_and_col(self.lo);
+ let loc1 = hcx.codemap().byte_pos_to_line_and_col(self.lo());
let loc1 = loc1.as_ref()
.map(|&(ref fm, line, col)| (&fm.name[..], line, col.to_usize()))
.unwrap_or(("???", 0, 0));
}
}
- if self.ctxt == SyntaxContext::empty() {
+ if self.ctxt() == SyntaxContext::empty() {
0u8.hash_stable(hcx, hasher);
} else {
1u8.hash_stable(hcx, hasher);