{
  localUrl: '../page/moral_hazard.html',
  arbitalUrl: 'https://arbital.com/p/moral_hazard',
  rawJsonUrl: '../raw/2sb.json',
  likeableId: '1707',
  likeableType: 'page',
  myLikeValue: '0',
  likeCount: '0',
  dislikeCount: '0',
  likeScore: '0',
  individualLikes: [],
  pageId: 'moral_hazard',
  edit: '1',
  editSummary: '',
  prevEdit: '0',
  currentEdit: '1',
  wasPublished: 'true',
  type: 'wiki',
  title: 'Moral hazards in AGI development',
  clickbait: '"Moral hazard" is when owners of an advanced AGI give in to the temptation to do things with it that the rest of us would regard as 'bad', like, say, declaring themselves God-Emperor.',
  textLength: '1196',
  alias: 'moral_hazard',
  externalUrl: '',
  sortChildrenBy: 'likes',
  hasVote: 'false',
  voteType: '',
  votesAnonymous: 'false',
  editCreatorId: 'EliezerYudkowsky',
  editCreatedAt: '2016-03-23 22:51:40',
  pageCreatorId: 'EliezerYudkowsky',
  pageCreatedAt: '2016-03-23 22:51:40',
  seeDomainId: '0',
  editDomainId: 'EliezerYudkowsky',
  submitToDomainId: '0',
  isAutosave: 'false',
  isSnapshot: 'false',
  isLiveEdit: 'true',
  isMinorEdit: 'false',
  indirectTeacher: 'false',
  todoCount: '0',
  isEditorComment: 'false',
  isApprovedComment: 'true',
  isResolved: 'false',
  snapshotText: '',
  anchorContext: '',
  anchorText: '',
  anchorOffset: '0',
  mergedInto: '',
  isDeleted: 'false',
  viewCount: '166',
  text: '"Moral hazard" is when the directors of an advanced AGI give in to the temptation to direct the AGI in ways that the rest of us would regard as 'bad', like, say, declaring themselves God-Emperors.  Limiting the duration of the human programmers' exposure to the temptations of power is one reason to want a [1g3 non-human-commanded, internally sovereign] AGI *eventually,* directed by something like [cev coherent extrapolated volition], even if the far more difficult safety issues mean we shouldn't build the *first* AGI that way.  Anyone recommending "oversight" as a guard against moral hazard is advised to think hard about moral hazard in the overseers.\n\nA smart setup with any other body "overseeing" the programmers of a [6w Task AGI], if we don't just want the moral hazard transferred to people who may be even *less* trustworthy, probably means making sure that *in practice* both the programmers and the overseers have to agree on a Task before it gets carried out, not that one side can *in practice* do things even if the other side disagrees, where "in practice" would include e.g. it only taking one month to redevelop the technology in a way that responded to only the overseers.',
  metaText: '',
  isTextLoaded: 'true',
  isSubscribedToDiscussion: 'false',
  isSubscribedToUser: 'false',
  isSubscribedAsMaintainer: 'false',
  discussionSubscriberCount: '1',
  maintainerCount: '1',
  userSubscriberCount: '0',
  lastVisit: '',
  hasDraft: 'false',
  votes: [],
  voteSummary: 'null',
  muVoteSummary: '0',
  voteScaling: '0',
  currentUserVote: '-2',
  voteCount: '0',
  lockedVoteType: '',
  maxEditEver: '0',
  redLinkCount: '0',
  lockedBy: '',
  lockedUntil: '',
  nextPageId: '',
  prevPageId: '',
  usedAsMastery: 'false',
  proposalEditNum: '0',
  permissions: {
    edit: {
      has: 'false',
      reason: 'You don't have domain permission to edit this page'
    },
    proposeEdit: {
      has: 'true',
      reason: ''
    },
    delete: {
      has: 'false',
      reason: 'You don't have domain permission to delete this page'
    },
    comment: {
      has: 'false',
      reason: 'You can't comment in this domain because you are not a member'
    },
    proposeComment: {
      has: 'true',
      reason: ''
    }
  },
  summaries: {},
  creatorIds: [
    'EliezerYudkowsky'
  ],
  childIds: [],
  parentIds: [
    'value_achievement_dilemma'
  ],
  commentIds: [],
  questionIds: [],
  tagIds: [
    'stub_meta_tag'
  ],
  relatedIds: [],
  markIds: [],
  explanations: [],
  learnMore: [],
  requirements: [],
  subjects: [],
  lenses: [],
  lensParentId: '',
  pathPages: [],
  learnMoreTaughtMap: {},
  learnMoreCoveredMap: {},
  learnMoreRequiredMap: {},
  editHistory: {},
  domainSubmissions: {},
  answers: [],
  answerCount: '0',
  commentCount: '0',
  newCommentCount: '0',
  linkedMarkCount: '0',
  changeLogs: [
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '8988',
      pageId: 'moral_hazard',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newTag',
      createdAt: '2016-03-23 22:52:01',
      auxPageId: 'stub_meta_tag',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '8986',
      pageId: 'moral_hazard',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newEdit',
      createdAt: '2016-03-23 22:51:40',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '8985',
      pageId: 'moral_hazard',
      userId: 'EliezerYudkowsky',
      edit: '0',
      type: 'newParent',
      createdAt: '2016-03-23 22:39:20',
      auxPageId: 'value_achievement_dilemma',
      oldSettingsValue: '',
      newSettingsValue: ''
    }
  ],
  feedSubmissions: [],
  searchStrings: {},
  hasChildren: 'false',
  hasParents: 'true',
  redAliases: {},
  improvementTagIds: [],
  nonMetaTagIds: [],
  todos: [],
  slowDownMap: 'null',
  speedUpMap: 'null',
  arcPageIds: 'null',
  contentRequests: {}
}