linker.rs 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. use alloc::{
  2. boxed::Box,
  3. collections::BTreeMap,
  4. rc::Rc,
  5. string::{String, ToString},
  6. vec::Vec,
  7. };
  8. use core::{
  9. cell::RefCell,
  10. mem::{size_of, swap, transmute},
  11. ptr, slice,
  12. };
  13. use goblin::{
  14. elf::{
  15. header::ET_DYN,
  16. program_header,
  17. r#dyn::{Dyn, DT_DEBUG},
  18. reloc,
  19. sym,
  20. Elf,
  21. },
  22. error::{Error, Result},
  23. };
  24. use crate::{
  25. c_str::CString,
  26. fs::File,
  27. header::{fcntl, sys_mman, unistd, errno::STR_ERROR},
  28. io::Read,
  29. platform::{errno, types::c_void},
  30. };
  31. use super::{
  32. access::access,
  33. callbacks::LinkerCallbacks,
  34. debug::{RTLDDebug, RTLDState, _dl_debug_state, _r_debug},
  35. library::{DepTree, Library},
  36. tcb::{Master, Tcb},
  37. PAGE_SIZE,
  38. };
  39. #[cfg(target_os = "redox")]
  40. const PATH_SEP: char = ';';
  41. #[cfg(target_os = "linux")]
  42. const PATH_SEP: char = ':';
  43. pub struct DSO {
  44. pub name: String,
  45. pub base_addr: usize,
  46. pub entry_point: usize,
  47. }
  48. #[derive(Clone, Copy, Debug)]
  49. pub struct Symbol {
  50. pub value: usize,
  51. pub base: usize,
  52. pub size: usize,
  53. }
  54. impl Symbol {
  55. pub fn as_ptr(self) -> *mut c_void {
  56. (self.base + self.value) as *mut c_void
  57. }
  58. }
  59. pub struct Linker {
  60. // Used by load
  61. /// Library path to search when loading library by name
  62. library_path: String,
  63. root: Library,
  64. verbose: bool,
  65. tls_index_offset: usize,
  66. lib_spaces: BTreeMap<usize, Library>,
  67. counter: usize,
  68. pub cbs: Rc<RefCell<LinkerCallbacks>>,
  69. }
  70. impl Linker {
  71. pub fn new(library_path: &str, verbose: bool) -> Self {
  72. Self {
  73. library_path: library_path.to_string(),
  74. root: Library::new(),
  75. verbose,
  76. tls_index_offset: 0,
  77. lib_spaces: BTreeMap::new(),
  78. counter: 1,
  79. cbs: Rc::new(RefCell::new(LinkerCallbacks::new())),
  80. }
  81. }
  82. pub fn load(&mut self, name: &str, path: &str) -> Result<()> {
  83. let mut lib: Library = Library::new();
  84. swap(&mut lib, &mut self.root);
  85. lib.dep_tree = self.load_recursive(name, path, &mut lib)?;
  86. swap(&mut lib, &mut self.root);
  87. if self.verbose {
  88. println!("Dep tree: {:#?}", self.root.dep_tree);
  89. }
  90. return Ok(());
  91. }
  92. pub fn unload(&mut self, libspace: usize) {
  93. if let Some(lib) = self.lib_spaces.remove(&libspace) {
  94. for (_, (_, mmap)) in lib.mmaps {
  95. unsafe { sys_mman::munmap(mmap.as_mut_ptr() as *mut c_void, mmap.len()) };
  96. }
  97. }
  98. }
  99. fn load_recursive(&mut self, name: &str, path: &str, lib: &mut Library) -> Result<DepTree> {
  100. if self.verbose {
  101. println!("load {}: {}", name, path);
  102. }
  103. if lib.cir_dep.contains(name) {
  104. return Err(Error::Malformed(format!(
  105. "Circular dependency: {} is a dependency of itself",
  106. name
  107. )));
  108. }
  109. let mut deps = DepTree::new(name.to_string());
  110. let mut data = Vec::new();
  111. lib.cir_dep.insert(name.to_string());
  112. let path_c = CString::new(path)
  113. .map_err(|err| Error::Malformed(format!("invalid path '{}': {}", path, err)))?;
  114. {
  115. let flags = fcntl::O_RDONLY | fcntl::O_CLOEXEC;
  116. let mut file = File::open(&path_c, flags)
  117. .map_err(|err| Error::Malformed(format!("failed to open '{}': {}", path, err)))?;
  118. file.read_to_end(&mut data)
  119. .map_err(|err| Error::Malformed(format!("failed to read '{}': {}", path, err)))?;
  120. }
  121. deps.deps = self.load_data(name, data.into_boxed_slice(), lib)?;
  122. lib.cir_dep.remove(name);
  123. Ok(deps)
  124. }
  125. fn load_data(
  126. &mut self,
  127. name: &str,
  128. data: Box<[u8]>,
  129. lib: &mut Library,
  130. ) -> Result<Vec<DepTree>> {
  131. let elf = Elf::parse(&data)?;
  132. //println!("{:#?}", elf);
  133. let mut deps = Vec::new();
  134. for library in elf.libraries.iter() {
  135. if let Some(dep) = self._load_library(library, lib)? {
  136. deps.push(dep);
  137. }
  138. }
  139. lib.objects.insert(name.to_string(), data);
  140. return Ok(deps);
  141. }
  142. pub fn load_library(&mut self, name: &str) -> Result<usize> {
  143. let mut lib = Library::new();
  144. self._load_library(name, &mut lib)?;
  145. let ret = self.counter;
  146. self.lib_spaces.insert(ret, lib);
  147. self.counter += 1;
  148. return Ok(ret);
  149. }
  150. fn _load_library(&mut self, name: &str, lib: &mut Library) -> Result<Option<DepTree>> {
  151. if lib.objects.contains_key(name) || self.root.objects.contains_key(name) {
  152. // It should be previously resolved so we don't need to worry about it
  153. Ok(None)
  154. } else if name.contains('/') {
  155. Ok(Some(self.load_recursive(name, name, lib)?))
  156. } else {
  157. let library_path = self.library_path.clone();
  158. for part in library_path.split(PATH_SEP) {
  159. let path = if part.is_empty() {
  160. format!("./{}", name)
  161. } else {
  162. format!("{}/{}", part, name)
  163. };
  164. if self.verbose {
  165. println!("check {}", path);
  166. }
  167. let access = unsafe {
  168. let path_c = CString::new(path.as_bytes()).map_err(|err| {
  169. Error::Malformed(format!("invalid path '{}': {}", path, err))
  170. })?;
  171. // TODO: Use R_OK | X_OK
  172. // We cannot use unix stdlib because errno is thead local variable
  173. // and fs:[0] is not set yet.
  174. access(path_c.as_ptr(), unistd::F_OK) == 0
  175. };
  176. if access {
  177. return Ok(Some(self.load_recursive(name, &path, lib)?));
  178. }
  179. }
  180. Err(Error::Malformed(format!("failed to locate '{}'", name)))
  181. }
  182. }
  183. fn collect_syms(
  184. elf: &Elf,
  185. mmap: &[u8],
  186. verbose: bool,
  187. ) -> Result<(BTreeMap<String, Symbol>, BTreeMap<String, Symbol>)> {
  188. let mut globals = BTreeMap::new();
  189. let mut weak_syms = BTreeMap::new();
  190. for sym in elf.dynsyms.iter() {
  191. let bind = sym.st_bind();
  192. if sym.st_value == 0 || ![sym::STB_GLOBAL, sym::STB_WEAK].contains(&bind) {
  193. continue;
  194. }
  195. let name: String;
  196. let value: Symbol;
  197. if let Some(name_res) = elf.dynstrtab.get(sym.st_name) {
  198. name = name_res?.to_string();
  199. value = if is_pie_enabled(elf) {
  200. Symbol {
  201. base: mmap.as_ptr() as usize,
  202. value: sym.st_value as usize,
  203. size: sym.st_size as usize,
  204. }
  205. } else {
  206. Symbol {
  207. base: 0,
  208. value: sym.st_value as usize,
  209. size: sym.st_size as usize,
  210. }
  211. };
  212. } else {
  213. continue;
  214. }
  215. match sym.st_bind() {
  216. sym::STB_GLOBAL => {
  217. if verbose {
  218. println!(" global {}: {:x?} = {:p}", &name, sym, value.as_ptr());
  219. }
  220. globals.insert(name, value);
  221. }
  222. sym::STB_WEAK => {
  223. if verbose {
  224. println!(" weak {}: {:x?} = {:p}", &name, sym, value.as_ptr());
  225. }
  226. weak_syms.insert(name, value);
  227. }
  228. _ => unreachable!(),
  229. }
  230. }
  231. return Ok((globals, weak_syms));
  232. }
  233. pub fn get_sym(&self, name: &str, libspace: Option<usize>) -> Option<Symbol> {
  234. match libspace {
  235. Some(id) => {
  236. let lib = self.lib_spaces.get(&id)?;
  237. lib.get_sym(name)
  238. }
  239. None => self.root.get_sym(name),
  240. }
  241. }
  242. pub fn run_init(&self, libspace: Option<usize>) -> Result<()> {
  243. match libspace {
  244. Some(id) => {
  245. let lib = self.lib_spaces.get(&id).unwrap();
  246. self.run_tree(&lib, &lib.dep_tree, ".init_array")
  247. }
  248. None => self.run_tree(&self.root, &self.root.dep_tree, ".init_array"),
  249. }
  250. }
  251. pub fn run_fini(&self, libspace: Option<usize>) -> Result<()> {
  252. match libspace {
  253. Some(id) => {
  254. let lib = self.lib_spaces.get(&id).unwrap();
  255. self.run_tree(&lib, &lib.dep_tree, ".fini_array")
  256. }
  257. None => {
  258. //TODO we first need to deinitialize all the loaded libraries first!
  259. self.run_tree(&self.root, &self.root.dep_tree, ".fini_array")
  260. }
  261. }
  262. }
  263. fn run_tree(&self, lib: &Library, root: &DepTree, tree_name: &str) -> Result<()> {
  264. for node in root.deps.iter() {
  265. self.run_tree(lib, node, tree_name)?;
  266. }
  267. if self.verbose {
  268. println!("running {} {}", tree_name, &root.name);
  269. }
  270. let (_, mmap) = match lib.mmaps.get(&root.name) {
  271. Some(some) => some,
  272. None => return Ok(()),
  273. };
  274. let elf = Elf::parse(lib.objects.get(&root.name).unwrap())?;
  275. for section in &elf.section_headers {
  276. let name = match elf.shdr_strtab.get(section.sh_name) {
  277. Some(x) => match x {
  278. Ok(y) => y,
  279. _ => continue,
  280. },
  281. _ => continue,
  282. };
  283. if name == tree_name {
  284. let addr = if is_pie_enabled(&elf) {
  285. mmap.as_ptr() as usize + section.vm_range().start
  286. } else {
  287. section.vm_range().start
  288. };
  289. for i in (0..section.sh_size).step_by(8) {
  290. unsafe { call_inits_finis(addr + i as usize) };
  291. }
  292. }
  293. }
  294. return Ok(());
  295. }
  296. pub fn link(
  297. &mut self,
  298. primary_opt: Option<&str>,
  299. dso: Option<DSO>,
  300. libspace: Option<usize>,
  301. ) -> Result<Option<usize>> {
  302. match libspace {
  303. Some(id) => {
  304. let mut lib = self.lib_spaces.remove(&id).unwrap();
  305. let res = self._link(primary_opt, dso, &mut lib);
  306. self.lib_spaces.insert(id, lib);
  307. res
  308. }
  309. None => {
  310. let mut lib = Library::new();
  311. swap(&mut lib, &mut self.root);
  312. let res = self._link(primary_opt, dso, &mut lib);
  313. swap(&mut lib, &mut self.root);
  314. res
  315. }
  316. }
  317. }
  318. pub fn _link(
  319. &mut self,
  320. primary_opt: Option<&str>,
  321. dso: Option<DSO>,
  322. lib: &mut Library,
  323. ) -> Result<Option<usize>> {
  324. unsafe { _r_debug.state = RTLDState::RT_ADD };
  325. _dl_debug_state();
  326. let elfs = {
  327. let mut elfs = BTreeMap::new();
  328. for (name, data) in lib.objects.iter() {
  329. // Skip already linked libraries
  330. if !lib.mmaps.contains_key(&*name) && !self.root.mmaps.contains_key(&*name) {
  331. elfs.insert(name.as_str(), Elf::parse(&data)?);
  332. }
  333. }
  334. elfs
  335. };
  336. // Load all ELF files into memory and find all globals
  337. let mut tls_primary = 0;
  338. let mut tls_size = 0;
  339. for (elf_name, elf) in elfs.iter() {
  340. if self.verbose {
  341. println!("map {}", elf_name);
  342. }
  343. let object = match lib.objects.get(*elf_name) {
  344. Some(some) => some,
  345. None => continue,
  346. };
  347. // data for struct LinkMap
  348. let mut l_ld = 0;
  349. // Calculate virtual memory bounds
  350. let bounds = {
  351. let mut bounds_opt: Option<(usize, usize)> = None;
  352. for ph in elf.program_headers.iter() {
  353. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  354. let vaddr = ph.p_vaddr as usize - voff;
  355. let vsize =
  356. ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  357. match ph.p_type {
  358. program_header::PT_DYNAMIC => {
  359. l_ld = ph.p_vaddr;
  360. }
  361. program_header::PT_LOAD => {
  362. if self.verbose {
  363. println!(" load {:#x}, {:#x}: {:x?}", vaddr, vsize, ph);
  364. }
  365. if let Some(ref mut bounds) = bounds_opt {
  366. if vaddr < bounds.0 {
  367. bounds.0 = vaddr;
  368. }
  369. if vaddr + vsize > bounds.1 {
  370. bounds.1 = vaddr + vsize;
  371. }
  372. } else {
  373. bounds_opt = Some((vaddr, vaddr + vsize));
  374. }
  375. }
  376. program_header::PT_TLS => {
  377. if self.verbose {
  378. println!(" load tls {:#x}: {:x?}", vsize, ph);
  379. }
  380. tls_size += vsize;
  381. if Some(*elf_name) == primary_opt {
  382. tls_primary += vsize;
  383. }
  384. }
  385. _ => (),
  386. }
  387. }
  388. match bounds_opt {
  389. Some(some) => some,
  390. None => continue,
  391. }
  392. };
  393. if self.verbose {
  394. println!(" bounds {:#x}, {:#x}", bounds.0, bounds.1);
  395. }
  396. // Allocate memory
  397. let mmap = unsafe {
  398. let same_elf = if let Some(prog) = dso.as_ref() {
  399. if prog.name == *elf_name {
  400. true
  401. } else {
  402. false
  403. }
  404. } else {
  405. false
  406. };
  407. if same_elf {
  408. let addr = dso.as_ref().unwrap().base_addr;
  409. let size = if is_pie_enabled(&elf) {
  410. bounds.1
  411. } else {
  412. bounds.1 - bounds.0
  413. };
  414. // Fill the gaps i the binary
  415. let mut ranges = Vec::new();
  416. for ph in elf.program_headers.iter() {
  417. if ph.p_type == program_header::PT_LOAD {
  418. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  419. let vaddr = ph.p_vaddr as usize - voff;
  420. let vsize = ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE)
  421. * PAGE_SIZE;
  422. if is_pie_enabled(&elf) {
  423. ranges.push((vaddr, vsize));
  424. } else {
  425. ranges.push((vaddr - addr, vsize));
  426. }
  427. }
  428. }
  429. ranges.sort();
  430. let mut start = addr;
  431. for (vaddr, vsize) in ranges.iter() {
  432. if start < addr + vaddr {
  433. println!("mmap({:#x}, {})", start, addr + vaddr - start);
  434. let mut flags = sys_mman::MAP_ANONYMOUS | sys_mman::MAP_PRIVATE;
  435. if start != 0 {
  436. flags |= sys_mman::MAP_FIXED_NOREPLACE;
  437. }
  438. let ptr = sys_mman::mmap(
  439. start as *mut c_void,
  440. addr + vaddr - start,
  441. //TODO: Make it possible to not specify PROT_EXEC on Redox
  442. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  443. flags,
  444. -1,
  445. 0,
  446. );
  447. if ptr as usize == !0 /* MAP_FAILED */ {
  448. return Err(Error::Malformed(format!("failed to map {}. errno: {}", elf_name, STR_ERROR[errno as usize])));
  449. }
  450. if start as *mut c_void != ptr::null_mut() {
  451. assert_eq!(ptr, start as *mut c_void, "mmap must always map on the destination we requested");
  452. }
  453. }
  454. start = addr + vaddr + vsize
  455. }
  456. sys_mman::mprotect(
  457. addr as *mut c_void,
  458. size,
  459. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  460. );
  461. _r_debug.insert_first(addr as usize, &elf_name, addr + l_ld as usize);
  462. (addr as usize, slice::from_raw_parts_mut(addr as *mut u8, size))
  463. } else {
  464. let (start, end) = bounds;
  465. let size = end - start;
  466. println!("mmap({:#x}, {})", start, size);
  467. let mut flags = sys_mman::MAP_ANONYMOUS | sys_mman::MAP_PRIVATE;
  468. if start != 0 {
  469. flags |= sys_mman::MAP_FIXED_NOREPLACE;
  470. }
  471. let ptr = sys_mman::mmap(
  472. start as *mut c_void,
  473. size,
  474. //TODO: Make it possible to not specify PROT_EXEC on Redox
  475. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  476. flags,
  477. -1,
  478. 0,
  479. );
  480. if ptr as usize == !0 /* MAP_FAILED */ {
  481. return Err(Error::Malformed(format!("failed to map {}. errno: {}", elf_name, STR_ERROR[errno as usize])));
  482. }
  483. if start as *mut c_void != ptr::null_mut() {
  484. assert_eq!(ptr, start as *mut c_void, "mmap must always map on the destination we requested");
  485. }
  486. ptr::write_bytes(ptr as *mut u8, 0, size);
  487. _r_debug.insert(ptr as usize, &elf_name, ptr as usize + l_ld as usize);
  488. (start, slice::from_raw_parts_mut(ptr as *mut u8, size))
  489. }
  490. };
  491. if self.verbose {
  492. println!(" mmap {:p}, {:#x}", mmap.1.as_mut_ptr(), mmap.1.len());
  493. }
  494. let (globals, weak_syms) = Linker::collect_syms(&elf, &mmap.1, self.verbose)?;
  495. lib.globals.extend(globals.into_iter());
  496. lib.weak_syms.extend(weak_syms.into_iter());
  497. lib.mmaps.insert(elf_name.to_string(), mmap);
  498. }
  499. // Allocate TLS
  500. let mut tcb_opt = if primary_opt.is_some() {
  501. Some(unsafe { Tcb::new(tls_size)? })
  502. } else {
  503. None
  504. };
  505. if self.verbose {
  506. println!("tcb {:x?}", tcb_opt);
  507. }
  508. // Copy data
  509. let mut tls_offset = tls_primary;
  510. let mut tcb_masters = Vec::new();
  511. // Insert main image master
  512. tcb_masters.push(Master {
  513. ptr: ptr::null_mut(),
  514. len: 0,
  515. offset: 0,
  516. });
  517. let mut tls_ranges = BTreeMap::new();
  518. for (elf_name, elf) in elfs.iter() {
  519. let same_elf = if let Some(prog) = dso.as_ref() {
  520. if prog.name == *elf_name {
  521. true
  522. } else {
  523. false
  524. }
  525. } else {
  526. false
  527. };
  528. if same_elf {
  529. continue;
  530. }
  531. let object = match lib.objects.get(*elf_name) {
  532. Some(some) => some,
  533. None => continue,
  534. };
  535. let &mut (base_addr, ref mut mmap) = match lib.mmaps.get_mut(*elf_name) {
  536. Some(some) => some,
  537. None => continue,
  538. };
  539. if self.verbose {
  540. println!("load {}", elf_name);
  541. }
  542. // Copy data
  543. for ph in elf.program_headers.iter() {
  544. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  545. let vaddr = ph.p_vaddr as usize - voff;
  546. let vsize = ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  547. match ph.p_type {
  548. program_header::PT_LOAD => {
  549. let obj_data = {
  550. let range = ph.file_range();
  551. match object.get(range.clone()) {
  552. Some(some) => some,
  553. None => {
  554. return Err(Error::Malformed(format!(
  555. "failed to read {:x?}",
  556. range
  557. )))
  558. }
  559. }
  560. };
  561. let mmap_data = {
  562. let range = ph.p_vaddr as usize - base_addr..ph.p_vaddr as usize + obj_data.len() - base_addr;
  563. match mmap.get_mut(range.clone()) {
  564. Some(some) => some,
  565. None => {
  566. println!("mmap: {}", mmap.len());
  567. return Err(Error::Malformed(format!(
  568. "failed to write {:x?}",
  569. range
  570. )))
  571. }
  572. }
  573. };
  574. if self.verbose {
  575. println!(
  576. " copy {:#x}, {:#x}: {:#x}, {:#x}",
  577. vaddr,
  578. vsize,
  579. voff,
  580. obj_data.len()
  581. );
  582. }
  583. mmap_data.copy_from_slice(obj_data);
  584. }
  585. program_header::PT_TLS => {
  586. let valign = if ph.p_align > 0 {
  587. ((ph.p_memsz + (ph.p_align - 1)) / ph.p_align) * ph.p_align
  588. } else {
  589. ph.p_memsz
  590. } as usize;
  591. let mut tcb_master = Master {
  592. ptr: unsafe { mmap.as_ptr().add(ph.p_vaddr as usize) },
  593. len: ph.p_filesz as usize,
  594. offset: tls_size - valign,
  595. };
  596. if self.verbose {
  597. println!(
  598. " tls master {:p}, {:#x}: {:#x}, {:#x}",
  599. tcb_master.ptr, tcb_master.len, tcb_master.offset, valign,
  600. );
  601. }
  602. if Some(*elf_name) == primary_opt {
  603. tls_ranges.insert(
  604. elf_name.to_string(),
  605. (self.tls_index_offset, tcb_master.range()),
  606. );
  607. tcb_masters[0] = tcb_master;
  608. } else {
  609. tcb_master.offset -= tls_offset;
  610. tls_offset += vsize;
  611. tls_ranges.insert(
  612. elf_name.to_string(),
  613. (
  614. self.tls_index_offset + tcb_masters.len(),
  615. tcb_master.range(),
  616. ),
  617. );
  618. tcb_masters.push(tcb_master);
  619. }
  620. }
  621. _ => (),
  622. }
  623. }
  624. }
  625. self.tls_index_offset += tcb_masters.len();
  626. // Set master images for TLS and copy TLS data
  627. if let Some(ref mut tcb) = tcb_opt {
  628. unsafe {
  629. tcb.set_masters(tcb_masters.into_boxed_slice());
  630. tcb.copy_masters()?;
  631. }
  632. }
  633. // Perform relocations, and protect pages
  634. for (elf_name, elf) in elfs.iter() {
  635. if self.verbose {
  636. println!("link {}", elf_name);
  637. }
  638. // Relocate
  639. for rel in elf
  640. .dynrelas
  641. .iter()
  642. .chain(elf.dynrels.iter())
  643. .chain(elf.pltrelocs.iter())
  644. {
  645. // println!(" rel {}: {:x?}",
  646. // reloc::r_to_str(rel.r_type, elf.header.e_machine),
  647. // rel
  648. // );
  649. let symbol = if rel.r_sym > 0 {
  650. let sym = elf.dynsyms.get(rel.r_sym).ok_or(Error::Malformed(format!(
  651. "missing symbol for relocation {:?}",
  652. rel
  653. )))?;
  654. let name =
  655. elf.dynstrtab
  656. .get(sym.st_name)
  657. .ok_or(Error::Malformed(format!(
  658. "missing name for symbol {:?}",
  659. sym
  660. )))??;
  661. lib.get_sym(name)
  662. .or_else(|| self.root.get_sym(name))
  663. } else {
  664. None
  665. };
  666. let s = symbol.as_ref().map(|sym| sym.as_ptr() as usize).unwrap_or(0);
  667. let a = rel.r_addend.unwrap_or(0) as usize;
  668. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  669. Some(some) => some,
  670. None => continue,
  671. };
  672. let b = mmap.as_mut_ptr() as usize;
  673. let (tm, t) = if let Some((tls_index, tls_range)) = tls_ranges.get(*elf_name) {
  674. (*tls_index, tls_range.start)
  675. } else {
  676. (0, 0)
  677. };
  678. let ptr = if is_pie_enabled(&elf) {
  679. unsafe { mmap.as_mut_ptr().add(rel.r_offset as usize) }
  680. } else {
  681. rel.r_offset as *mut u8
  682. };
  683. let set_u64 = |value| {
  684. // println!(" set_u64 {:#x}", value);
  685. unsafe {
  686. *(ptr as *mut u64) = value;
  687. }
  688. };
  689. match rel.r_type {
  690. reloc::R_X86_64_64 => {
  691. set_u64((s + a) as u64);
  692. }
  693. reloc::R_X86_64_DTPMOD64 => {
  694. set_u64(tm as u64);
  695. }
  696. reloc::R_X86_64_DTPOFF64 => {
  697. if s != 0 {
  698. set_u64((s - b) as u64);
  699. } else {
  700. set_u64(s as u64);
  701. }
  702. }
  703. reloc::R_X86_64_GLOB_DAT | reloc::R_X86_64_JUMP_SLOT => {
  704. set_u64(s as u64);
  705. }
  706. reloc::R_X86_64_RELATIVE => {
  707. set_u64((b + a) as u64);
  708. }
  709. reloc::R_X86_64_TPOFF64 => {
  710. set_u64((s + a).wrapping_sub(t) as u64);
  711. }
  712. reloc::R_X86_64_IRELATIVE => (), // Handled below
  713. reloc::R_X86_64_COPY => unsafe {
  714. // TODO: Make this work
  715. let sym = symbol.as_ref().expect("R_X86_64_COPY called without valid symbol");
  716. ptr::copy_nonoverlapping(sym.as_ptr() as *const u8, ptr, sym.size as usize);
  717. }
  718. _ => {
  719. panic!(
  720. " {} unsupported",
  721. reloc::r_to_str(rel.r_type, elf.header.e_machine)
  722. );
  723. }
  724. }
  725. }
  726. // overwrite DT_DEBUG if exist in DYNAMIC segment
  727. // first we identify the location of DYNAMIC segment
  728. let mut dyn_start = None;
  729. let mut debug_start = None;
  730. for ph in elf.program_headers.iter() {
  731. if ph.p_type == program_header::PT_DYNAMIC {
  732. dyn_start = Some(ph.p_vaddr as usize);
  733. }
  734. }
  735. // next we identify the location of DT_DEBUG in .dynamic section
  736. if let Some(dynamic) = elf.dynamic.as_ref() {
  737. let mut i = 0;
  738. for entry in &dynamic.dyns {
  739. if entry.d_tag == DT_DEBUG {
  740. debug_start = Some(i as usize);
  741. break;
  742. }
  743. i += 1;
  744. }
  745. }
  746. if let Some(dyn_start_addr) = dyn_start {
  747. if let Some(i) = debug_start {
  748. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  749. Some(some) => some,
  750. None => continue,
  751. };
  752. let bytes: [u8; size_of::<Dyn>() / 2] =
  753. unsafe { transmute((&_r_debug) as *const RTLDDebug as usize) };
  754. let start = if is_pie_enabled(elf) {
  755. dyn_start_addr + i * size_of::<Dyn>() + size_of::<Dyn>() / 2
  756. } else {
  757. dyn_start_addr + i * size_of::<Dyn>() + size_of::<Dyn>() / 2
  758. - mmap.as_mut_ptr() as usize
  759. };
  760. mmap[start..start + size_of::<Dyn>() / 2].clone_from_slice(&bytes);
  761. }
  762. }
  763. // Protect pages
  764. for ph in elf.program_headers.iter() {
  765. if ph.p_type == program_header::PT_LOAD {
  766. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  767. let vaddr = ph.p_vaddr as usize - voff;
  768. let vsize =
  769. ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  770. let mut prot = 0;
  771. if ph.p_flags & program_header::PF_R == program_header::PF_R {
  772. prot |= sys_mman::PROT_READ;
  773. }
  774. // W ^ X. If it is executable, do not allow it to be writable, even if requested
  775. if ph.p_flags & program_header::PF_X == program_header::PF_X {
  776. prot |= sys_mman::PROT_EXEC;
  777. } else if ph.p_flags & program_header::PF_W == program_header::PF_W {
  778. prot |= sys_mman::PROT_WRITE;
  779. }
  780. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  781. Some(some) => some,
  782. None => continue,
  783. };
  784. let res = unsafe {
  785. let ptr = if is_pie_enabled(elf) {
  786. mmap.as_mut_ptr().add(vaddr)
  787. } else {
  788. vaddr as *const u8
  789. };
  790. if self.verbose {
  791. println!(" prot {:#x}, {:#x}: {:p}, {:#x}", vaddr, vsize, ptr, prot);
  792. }
  793. sys_mman::mprotect(ptr as *mut c_void, vsize, prot)
  794. };
  795. if res < 0 {
  796. return Err(Error::Malformed(format!("failed to mprotect {}", elf_name)));
  797. }
  798. }
  799. }
  800. }
  801. // Activate TLS
  802. if let Some(ref mut tcb) = tcb_opt {
  803. unsafe {
  804. tcb.activate();
  805. }
  806. }
  807. // Perform indirect relocations (necessary evil), gather entry point
  808. let mut entry_opt = None;
  809. for (elf_name, elf) in elfs.iter() {
  810. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  811. Some(some) => some,
  812. None => continue,
  813. };
  814. if self.verbose {
  815. println!("entry {}", elf_name);
  816. }
  817. if Some(*elf_name) == primary_opt {
  818. if is_pie_enabled(&elf) {
  819. entry_opt = Some(mmap.as_mut_ptr() as usize + elf.header.e_entry as usize);
  820. } else {
  821. entry_opt = Some(elf.header.e_entry as usize);
  822. }
  823. }
  824. // Relocate
  825. for rel in elf
  826. .dynrelas
  827. .iter()
  828. .chain(elf.dynrels.iter())
  829. .chain(elf.pltrelocs.iter())
  830. {
  831. // println!(" rel {}: {:x?}",
  832. // reloc::r_to_str(rel.r_type, elf.header.e_machine),
  833. // rel
  834. // );
  835. let a = rel.r_addend.unwrap_or(0) as usize;
  836. let b = mmap.as_mut_ptr() as usize;
  837. let ptr = unsafe { mmap.as_mut_ptr().add(rel.r_offset as usize) };
  838. let set_u64 = |value| {
  839. // println!(" set_u64 {:#x}", value);
  840. unsafe {
  841. *(ptr as *mut u64) = value;
  842. }
  843. };
  844. if rel.r_type == reloc::R_X86_64_IRELATIVE {
  845. unsafe {
  846. let f: unsafe extern "C" fn() -> u64 = transmute(b + a);
  847. set_u64(f());
  848. }
  849. }
  850. }
  851. // Protect pages
  852. for ph in elf.program_headers.iter() {
  853. if let program_header::PT_LOAD = ph.p_type {
  854. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  855. let vaddr = ph.p_vaddr as usize - voff;
  856. let vsize =
  857. ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  858. let mut prot = 0;
  859. if ph.p_flags & program_header::PF_R == program_header::PF_R {
  860. prot |= sys_mman::PROT_READ;
  861. }
  862. // W ^ X. If it is executable, do not allow it to be writable, even if requested
  863. if ph.p_flags & program_header::PF_X == program_header::PF_X {
  864. prot |= sys_mman::PROT_EXEC;
  865. } else if ph.p_flags & program_header::PF_W == program_header::PF_W {
  866. prot |= sys_mman::PROT_WRITE;
  867. }
  868. let res = unsafe {
  869. let ptr = if is_pie_enabled(&elf) {
  870. mmap.as_mut_ptr().add(vaddr)
  871. } else {
  872. vaddr as *const u8
  873. };
  874. if self.verbose {
  875. println!(" prot {:#x}, {:#x}: {:p}, {:#x}", vaddr, vsize, ptr, prot);
  876. }
  877. sys_mman::mprotect(ptr as *mut c_void, vsize, prot)
  878. };
  879. if res < 0 {
  880. return Err(Error::Malformed(format!("failed to mprotect {}", elf_name)));
  881. }
  882. }
  883. }
  884. }
  885. unsafe { _r_debug.state = RTLDState::RT_CONSISTENT };
  886. _dl_debug_state();
  887. Ok(entry_opt)
  888. }
  889. }
  890. unsafe fn call_inits_finis(addr: usize) {
  891. let func = transmute::<usize, *const Option<extern "C" fn()>>(addr);
  892. (*func).map(|x| x());
  893. }
  894. fn is_pie_enabled(elf: &Elf) -> bool {
  895. if elf.header.e_type == ET_DYN {
  896. true
  897. } else {
  898. false
  899. }
  900. }