{
  localUrl: '../page/fair_problem_class.html',
  arbitalUrl: 'https://arbital.com/p/fair_problem_class',
  rawJsonUrl: '../raw/7d6.json',
  likeableId: '0',
  likeableType: 'page',
  myLikeValue: '0',
  likeCount: '0',
  dislikeCount: '0',
  likeScore: '0',
  individualLikes: [],
  pageId: 'fair_problem_class',
  edit: '1',
  editSummary: '',
  prevEdit: '0',
  currentEdit: '1',
  wasPublished: 'true',
  type: 'wiki',
  title: 'Fair problem class',
  clickbait: 'A problem is 'fair' (according to logical decision theory) when only the results matter and not how we get there.',
  textLength: '825',
  alias: 'fair_problem_class',
  externalUrl: '',
  sortChildrenBy: 'likes',
  hasVote: 'false',
  voteType: '',
  votesAnonymous: 'false',
  editCreatorId: 'EliezerYudkowsky',
  editCreatedAt: '2017-01-12 06:02:28',
  pageCreatorId: 'EliezerYudkowsky',
  pageCreatedAt: '2017-01-12 06:02:28',
  seeDomainId: '0',
  editDomainId: '15',
  submitToDomainId: '0',
  isAutosave: 'false',
  isSnapshot: 'false',
  isLiveEdit: 'true',
  isMinorEdit: 'false',
  indirectTeacher: 'false',
  todoCount: '0',
  isEditorComment: 'false',
  isApprovedComment: 'false',
  isResolved: 'false',
  snapshotText: '',
  anchorContext: '',
  anchorText: '',
  anchorOffset: '0',
  mergedInto: '',
  isDeleted: 'false',
  viewCount: '205',
  text: 'A 'fair' problem class, in logical decision theory, is a problem class in which agents' payoffs depend only on their choices and policies, not directly on their algorithm.  For example, suppose my problem is "If an agent chooses X, I will give them \\$10, and if they choose Y, I will give them \\$0."  This problem is 'fair' because we made no mention of *why* the agent chose X or Y; only the agent's actual choice matters.  On the other hand, suppose I say, "I will give an agent \\$10 if it chooses X over Y *because* it is an alphabetizing agent and X is higher in alphabetical order than Y; but if the agent chooses X for any other reason, like wanting money, I will give the agent \\$0."  This problem is not 'fair' because it rewards having an algorithm apart from any output or other abstract behavior of that algorithm.',
  metaText: '',
  isTextLoaded: 'true',
  isSubscribedToDiscussion: 'false',
  isSubscribedToUser: 'false',
  isSubscribedAsMaintainer: 'false',
  discussionSubscriberCount: '1',
  maintainerCount: '1',
  userSubscriberCount: '0',
  lastVisit: '',
  hasDraft: 'false',
  votes: [],
  voteSummary: 'null',
  muVoteSummary: '0',
  voteScaling: '0',
  currentUserVote: '-2',
  voteCount: '0',
  lockedVoteType: '',
  maxEditEver: '0',
  redLinkCount: '0',
  lockedBy: '',
  lockedUntil: '',
  nextPageId: '',
  prevPageId: '',
  usedAsMastery: 'false',
  proposalEditNum: '0',
  permissions: {
    edit: {
      has: 'false',
      reason: 'You don't have domain permission to edit this page'
    },
    proposeEdit: {
      has: 'true',
      reason: ''
    },
    delete: {
      has: 'false',
      reason: 'You don't have domain permission to delete this page'
    },
    comment: {
      has: 'false',
      reason: 'You can't comment in this domain because you are not a member'
    },
    proposeComment: {
      has: 'true',
      reason: ''
    }
  },
  summaries: {},
  creatorIds: [
    'EliezerYudkowsky'
  ],
  childIds: [],
  parentIds: [
    'logical_dt'
  ],
  commentIds: [],
  questionIds: [],
  tagIds: [
    'start_meta_tag'
  ],
  relatedIds: [],
  markIds: [],
  explanations: [],
  learnMore: [],
  requirements: [],
  subjects: [],
  lenses: [],
  lensParentId: '',
  pathPages: [],
  learnMoreTaughtMap: {},
  learnMoreCoveredMap: {},
  learnMoreRequiredMap: {},
  editHistory: {},
  domainSubmissions: {},
  answers: [],
  answerCount: '0',
  commentCount: '0',
  newCommentCount: '0',
  linkedMarkCount: '0',
  changeLogs: [
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '21638',
      pageId: 'fair_problem_class',
      userId: 'EliezerYudkowsky',
      edit: '0',
      type: 'newTag',
      createdAt: '2017-01-12 06:02:29',
      auxPageId: 'start_meta_tag',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '21640',
      pageId: 'fair_problem_class',
      userId: 'EliezerYudkowsky',
      edit: '0',
      type: 'newParent',
      createdAt: '2017-01-12 06:02:29',
      auxPageId: 'logical_dt',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '21637',
      pageId: 'fair_problem_class',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newEdit',
      createdAt: '2017-01-12 06:02:28',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    }
  ],
  feedSubmissions: [],
  searchStrings: {},
  hasChildren: 'false',
  hasParents: 'true',
  redAliases: {},
  improvementTagIds: [],
  nonMetaTagIds: [],
  todos: [],
  slowDownMap: 'null',
  speedUpMap: 'null',
  arcPageIds: 'null',
  contentRequests: {}
}