{
  localUrl: '../page/total_alignment.html',
  arbitalUrl: 'https://arbital.com/p/total_alignment',
  rawJsonUrl: '../raw/41k.json',
  likeableId: '0',
  likeableType: 'page',
  myLikeValue: '0',
  likeCount: '0',
  dislikeCount: '0',
  likeScore: '0',
  individualLikes: [],
  pageId: 'total_alignment',
  edit: '1',
  editSummary: '',
  prevEdit: '0',
  currentEdit: '1',
  wasPublished: 'true',
  type: 'wiki',
  title: 'Total alignment',
  clickbait: 'We say that an advanced AI is "totally aligned" when it knows *exactly* which outcomes and plans are beneficial, with no further user input.',
  textLength: '1462',
  alias: 'total_alignment',
  externalUrl: '',
  sortChildrenBy: 'likes',
  hasVote: 'false',
  voteType: '',
  votesAnonymous: 'false',
  editCreatorId: 'EliezerYudkowsky',
  editCreatedAt: '2016-06-06 19:52:05',
  pageCreatorId: 'EliezerYudkowsky',
  pageCreatedAt: '2016-06-06 19:52:05',
  seeDomainId: '0',
  editDomainId: 'EliezerYudkowsky',
  submitToDomainId: '0',
  isAutosave: 'false',
  isSnapshot: 'false',
  isLiveEdit: 'true',
  isMinorEdit: 'false',
  indirectTeacher: 'false',
  todoCount: '0',
  isEditorComment: 'false',
  isApprovedComment: 'true',
  isResolved: 'false',
  snapshotText: '',
  anchorContext: '',
  anchorText: '',
  anchorOffset: '0',
  mergedInto: '',
  isDeleted: 'false',
  viewCount: '141',
  text: 'An advanced agent can be said to be "totally aligned" when it can assess the *exact* [-55] of well-described outcomes and hence the *exact* subjective value of actions, policies, and plans; where [-55] has its overridden meaning of a metasyntactic variable standing in for "whatever we really do or really should value in the world or want from an Artificial Intelligence" (this is the same as "normative" if the speaker believes in normativity).  That is:  It's an advanced agent that captures *all* the distinctions we would make or should make within which outcomes are good or bad; it has "full coverage" of the true or intended goals; it correctly resolves every [-2fr].\n\nWe don't need to try and give such an AI simplified orders like, e.g., "try to have a [2pf lower impact]" because we're worried about, e.g., a [-42] problem on trying to draw exact boundaries around what constitutes a bad impact.  The AI knows *everything* worth knowing about which impacts are bad, and even if it thinks of a really weird exotic plan, it will still be able to figure out which aspects of this plan match our intended notion of  [-55] or a normative notion of  [-55].\n\nIf this agent does not systematically underestimate the probability of bad outcomes / overestimate the probability of good outcomes, and its maximization over policies is not subject to adverse subjection, its estimates of expected [-55] will be well-calibrated even from our own outside standpoint.',
  metaText: '',
  isTextLoaded: 'true',
  isSubscribedToDiscussion: 'false',
  isSubscribedToUser: 'false',
  isSubscribedAsMaintainer: 'false',
  discussionSubscriberCount: '1',
  maintainerCount: '1',
  userSubscriberCount: '0',
  lastVisit: '',
  hasDraft: 'false',
  votes: [],
  voteSummary: 'null',
  muVoteSummary: '0',
  voteScaling: '0',
  currentUserVote: '-2',
  voteCount: '0',
  lockedVoteType: '',
  maxEditEver: '0',
  redLinkCount: '0',
  lockedBy: '',
  lockedUntil: '',
  nextPageId: '',
  prevPageId: '',
  usedAsMastery: 'false',
  proposalEditNum: '0',
  permissions: {
    edit: {
      has: 'false',
      reason: 'You don't have domain permission to edit this page'
    },
    proposeEdit: {
      has: 'true',
      reason: ''
    },
    delete: {
      has: 'false',
      reason: 'You don't have domain permission to delete this page'
    },
    comment: {
      has: 'false',
      reason: 'You can't comment in this domain because you are not a member'
    },
    proposeComment: {
      has: 'true',
      reason: ''
    }
  },
  summaries: {},
  creatorIds: [
    'EliezerYudkowsky'
  ],
  childIds: [],
  parentIds: [
    'value_alignment_problem'
  ],
  commentIds: [],
  questionIds: [],
  tagIds: [
    'stub_meta_tag'
  ],
  relatedIds: [],
  markIds: [],
  explanations: [],
  learnMore: [],
  requirements: [],
  subjects: [],
  lenses: [],
  lensParentId: '',
  pathPages: [],
  learnMoreTaughtMap: {},
  learnMoreCoveredMap: {},
  learnMoreRequiredMap: {},
  editHistory: {},
  domainSubmissions: {},
  answers: [],
  answerCount: '0',
  commentCount: '0',
  newCommentCount: '0',
  linkedMarkCount: '0',
  changeLogs: [
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '11836',
      pageId: 'total_alignment',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newEdit',
      createdAt: '2016-06-06 19:52:05',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '11832',
      pageId: 'total_alignment',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newTag',
      createdAt: '2016-06-06 19:42:33',
      auxPageId: 'stub_meta_tag',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '11831',
      pageId: 'total_alignment',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newParent',
      createdAt: '2016-06-06 19:42:29',
      auxPageId: 'value_alignment_problem',
      oldSettingsValue: '',
      newSettingsValue: ''
    }
  ],
  feedSubmissions: [],
  searchStrings: {},
  hasChildren: 'false',
  hasParents: 'true',
  redAliases: {},
  improvementTagIds: [],
  nonMetaTagIds: [],
  todos: [],
  slowDownMap: 'null',
  speedUpMap: 'null',
  arcPageIds: 'null',
  contentRequests: {}
}