linker.rs 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. use alloc::{
  2. boxed::Box,
  3. collections::{BTreeMap, BTreeSet},
  4. rc::Rc,
  5. string::{String, ToString},
  6. vec::Vec,
  7. };
  8. use core::{
  9. cell::RefCell,
  10. mem::{size_of, swap, transmute},
  11. ptr, slice,
  12. };
  13. use goblin::{
  14. elf::{
  15. header::ET_DYN,
  16. program_header,
  17. r#dyn::{Dyn, DT_DEBUG, DT_RUNPATH},
  18. reloc, sym, Elf,
  19. },
  20. error::{Error, Result},
  21. };
  22. use crate::{
  23. c_str::CString,
  24. fs::File,
  25. header::{errno::STR_ERROR, fcntl, sys_mman, unistd},
  26. io::Read,
  27. platform::{errno, types::c_void},
  28. };
  29. use super::{
  30. access::accessible,
  31. callbacks::LinkerCallbacks,
  32. debug::{RTLDDebug, RTLDState, _dl_debug_state, _r_debug},
  33. library::{DepTree, Library},
  34. tcb::{Master, Tcb},
  35. PAGE_SIZE,
  36. };
  37. #[cfg(target_os = "redox")]
  38. pub const PATH_SEP: char = ';';
  39. #[cfg(target_os = "linux")]
  40. pub const PATH_SEP: char = ':';
  41. pub struct DSO {
  42. pub name: String,
  43. pub base_addr: usize,
  44. pub entry_point: usize,
  45. }
  46. #[derive(Clone, Copy, Debug)]
  47. pub struct Symbol {
  48. pub value: usize,
  49. pub base: usize,
  50. pub size: usize,
  51. }
  52. impl Symbol {
  53. pub fn as_ptr(self) -> *mut c_void {
  54. (self.base + self.value) as *mut c_void
  55. }
  56. }
  57. pub struct Linker {
  58. // Used by load
  59. /// Library path to search when loading library by name
  60. default_library_path: String,
  61. ld_library_path: Option<String>,
  62. root: Library,
  63. verbose: bool,
  64. tls_index_offset: usize,
  65. lib_spaces: BTreeMap<usize, Library>,
  66. counter: usize,
  67. pub cbs: Rc<RefCell<LinkerCallbacks>>,
  68. }
  69. impl Linker {
  70. pub fn new(ld_library_path: Option<String>, verbose: bool) -> Self {
  71. Self {
  72. default_library_path: "/lib".to_string(),
  73. ld_library_path: ld_library_path,
  74. root: Library::new(),
  75. verbose,
  76. tls_index_offset: 0,
  77. lib_spaces: BTreeMap::new(),
  78. counter: 1,
  79. cbs: Rc::new(RefCell::new(LinkerCallbacks::new())),
  80. }
  81. }
  82. pub fn load(&mut self, name: &str, path: &str) -> Result<()> {
  83. let mut lib: Library = Library::new();
  84. swap(&mut lib, &mut self.root);
  85. lib.dep_tree = self.load_recursive(name, path, &mut lib)?;
  86. swap(&mut lib, &mut self.root);
  87. if self.verbose {
  88. println!("Dep tree: {:#?}", self.root.dep_tree);
  89. }
  90. return Ok(());
  91. }
  92. pub fn unload(&mut self, libspace: usize) {
  93. if let Some(lib) = self.lib_spaces.remove(&libspace) {
  94. for (_, (_, mmap)) in lib.mmaps {
  95. unsafe { sys_mman::munmap(mmap.as_mut_ptr() as *mut c_void, mmap.len()) };
  96. }
  97. }
  98. }
  99. fn load_recursive(&mut self, name: &str, path: &str, lib: &mut Library) -> Result<DepTree> {
  100. if self.verbose {
  101. println!("load {}: {}", name, path);
  102. }
  103. if lib.cir_dep.contains(name) {
  104. return Err(Error::Malformed(format!(
  105. "Circular dependency: {} is a dependency of itself",
  106. name
  107. )));
  108. }
  109. let mut deps = DepTree::new(name.to_string());
  110. let mut data = Vec::new();
  111. lib.cir_dep.insert(name.to_string());
  112. let path_c = CString::new(path)
  113. .map_err(|err| Error::Malformed(format!("invalid path '{}': {}", path, err)))?;
  114. {
  115. let flags = fcntl::O_RDONLY | fcntl::O_CLOEXEC;
  116. let mut file = File::open(&path_c, flags)
  117. .map_err(|err| Error::Malformed(format!("failed to open '{}': {}", path, err)))?;
  118. file.read_to_end(&mut data)
  119. .map_err(|err| Error::Malformed(format!("failed to read '{}': {}", path, err)))?;
  120. }
  121. deps.deps = self.load_data(name, data.into_boxed_slice(), lib)?;
  122. lib.cir_dep.remove(name);
  123. Ok(deps)
  124. }
  125. fn load_data(
  126. &mut self,
  127. name: &str,
  128. data: Box<[u8]>,
  129. lib: &mut Library,
  130. ) -> Result<Vec<DepTree>> {
  131. let elf = Elf::parse(&data)?;
  132. //println!("{:#?}", elf);
  133. // search for RUNPATH
  134. lib.runpath = if let Some(dynamic) = elf.dynamic {
  135. let entry = dynamic.dyns.iter().find(|d| d.d_tag == DT_RUNPATH);
  136. match entry {
  137. Some(entry) => {
  138. let path = elf
  139. .dynstrtab
  140. .get(entry.d_val as usize)
  141. .ok_or(Error::Malformed("Missing RUNPATH in dynstrtab".to_string()))??;
  142. Some(path.to_string())
  143. }
  144. _ => None,
  145. }
  146. } else {
  147. None
  148. };
  149. let mut deps = Vec::new();
  150. for library in elf.libraries.iter() {
  151. if let Some(dep) = self._load_library(library, lib)? {
  152. deps.push(dep);
  153. }
  154. }
  155. let key = match elf.soname {
  156. Some(soname) => soname,
  157. _ => name,
  158. };
  159. if !lib.objects.contains_key(key) {
  160. lib.objects.insert(key.to_string(), data);
  161. }
  162. return Ok(deps);
  163. }
  164. pub fn load_library(&mut self, name: &str) -> Result<usize> {
  165. let mut lib = Library::new();
  166. self._load_library(name, &mut lib)?;
  167. let ret = self.counter;
  168. self.lib_spaces.insert(ret, lib);
  169. self.counter += 1;
  170. return Ok(ret);
  171. }
  172. fn _load_library(&mut self, name: &str, lib: &mut Library) -> Result<Option<DepTree>> {
  173. if lib.objects.contains_key(name) || self.root.objects.contains_key(name) {
  174. // It should be previously resolved so we don't need to worry about it
  175. Ok(None)
  176. } else if name.contains('/') {
  177. Ok(Some(self.load_recursive(name, name, lib)?))
  178. } else {
  179. let mut paths = Vec::new();
  180. if let Some(ld_library_path) = &self.ld_library_path {
  181. paths.push(ld_library_path);
  182. }
  183. if let Some(runpath) = &lib.runpath {
  184. paths.push(runpath);
  185. }
  186. paths.push(&self.default_library_path);
  187. for part in paths.iter() {
  188. let path = if part.is_empty() {
  189. format!("./{}", name)
  190. } else {
  191. format!("{}/{}", part, name)
  192. };
  193. if self.verbose {
  194. println!("check {}", path);
  195. }
  196. if accessible(&path, unistd::F_OK) == 0 {
  197. return Ok(Some(self.load_recursive(name, &path, lib)?));
  198. }
  199. }
  200. Err(Error::Malformed(format!("failed to locate '{}'", name)))
  201. }
  202. }
  203. fn collect_syms(
  204. elf: &Elf,
  205. mmap: &[u8],
  206. verbose: bool,
  207. ) -> Result<(BTreeMap<String, Symbol>, BTreeMap<String, Symbol>)> {
  208. let mut globals = BTreeMap::new();
  209. let mut weak_syms = BTreeMap::new();
  210. for sym in elf.dynsyms.iter() {
  211. let bind = sym.st_bind();
  212. if sym.st_value == 0 || ![sym::STB_GLOBAL, sym::STB_WEAK].contains(&bind) {
  213. continue;
  214. }
  215. let name: String;
  216. let value: Symbol;
  217. if let Some(name_res) = elf.dynstrtab.get(sym.st_name) {
  218. name = name_res?.to_string();
  219. value = if is_pie_enabled(elf) {
  220. Symbol {
  221. base: mmap.as_ptr() as usize,
  222. value: sym.st_value as usize,
  223. size: sym.st_size as usize,
  224. }
  225. } else {
  226. Symbol {
  227. base: 0,
  228. value: sym.st_value as usize,
  229. size: sym.st_size as usize,
  230. }
  231. };
  232. } else {
  233. continue;
  234. }
  235. match sym.st_bind() {
  236. sym::STB_GLOBAL => {
  237. if verbose {
  238. println!(" global {}: {:x?} = {:p}", &name, sym, value.as_ptr());
  239. }
  240. globals.insert(name, value);
  241. }
  242. sym::STB_WEAK => {
  243. if verbose {
  244. println!(" weak {}: {:x?} = {:p}", &name, sym, value.as_ptr());
  245. }
  246. weak_syms.insert(name, value);
  247. }
  248. _ => unreachable!(),
  249. }
  250. }
  251. return Ok((globals, weak_syms));
  252. }
  253. pub fn get_sym(&self, name: &str, libspace: Option<usize>) -> Option<Symbol> {
  254. match libspace {
  255. Some(id) => {
  256. let lib = self.lib_spaces.get(&id)?;
  257. lib.get_sym(name)
  258. }
  259. None => self.root.get_sym(name),
  260. }
  261. }
  262. pub fn run_init(&self, libspace: Option<usize>) -> Result<()> {
  263. match libspace {
  264. Some(id) => {
  265. let lib = self.lib_spaces.get(&id).unwrap();
  266. self.run_tree(&lib, &lib.dep_tree, ".init_array")
  267. }
  268. None => self.run_tree(&self.root, &self.root.dep_tree, ".init_array"),
  269. }
  270. }
  271. pub fn run_fini(&self, libspace: Option<usize>) -> Result<()> {
  272. match libspace {
  273. Some(id) => {
  274. let lib = self.lib_spaces.get(&id).unwrap();
  275. self.run_tree(&lib, &lib.dep_tree, ".fini_array")
  276. }
  277. None => {
  278. //TODO we first need to deinitialize all the loaded libraries first!
  279. self.run_tree(&self.root, &self.root.dep_tree, ".fini_array")
  280. }
  281. }
  282. }
  283. fn run_tree(&self, lib: &Library, root: &DepTree, tree_name: &str) -> Result<()> {
  284. for node in root.deps.iter() {
  285. self.run_tree(lib, node, tree_name)?;
  286. }
  287. if self.verbose {
  288. println!("running {} {}", tree_name, &root.name);
  289. }
  290. let (_, mmap) = match lib.mmaps.get(&root.name) {
  291. Some(some) => some,
  292. None => return Ok(()),
  293. };
  294. let elf = Elf::parse(lib.objects.get(&root.name).unwrap())?;
  295. for section in &elf.section_headers {
  296. let name = match elf.shdr_strtab.get(section.sh_name) {
  297. Some(x) => match x {
  298. Ok(y) => y,
  299. _ => continue,
  300. },
  301. _ => continue,
  302. };
  303. if name == tree_name {
  304. let addr = if is_pie_enabled(&elf) {
  305. mmap.as_ptr() as usize + section.vm_range().start
  306. } else {
  307. section.vm_range().start
  308. };
  309. for i in (0..section.sh_size).step_by(8) {
  310. unsafe { call_inits_finis(addr + i as usize) };
  311. }
  312. }
  313. }
  314. return Ok(());
  315. }
  316. pub fn link(
  317. &mut self,
  318. primary_opt: Option<&str>,
  319. dso: Option<DSO>,
  320. libspace: Option<usize>,
  321. ) -> Result<Option<usize>> {
  322. match libspace {
  323. Some(id) => {
  324. let mut lib = self.lib_spaces.remove(&id).unwrap();
  325. let res = self._link(primary_opt, dso, &mut lib);
  326. self.lib_spaces.insert(id, lib);
  327. res
  328. }
  329. None => {
  330. let mut lib = Library::new();
  331. swap(&mut lib, &mut self.root);
  332. let res = self._link(primary_opt, dso, &mut lib);
  333. swap(&mut lib, &mut self.root);
  334. res
  335. }
  336. }
  337. }
  338. pub fn _link(
  339. &mut self,
  340. primary_opt: Option<&str>,
  341. dso: Option<DSO>,
  342. lib: &mut Library,
  343. ) -> Result<Option<usize>> {
  344. unsafe { _r_debug.state = RTLDState::RT_ADD };
  345. _dl_debug_state();
  346. let mut skip_list = BTreeSet::new();
  347. let elfs = {
  348. let mut elfs = BTreeMap::new();
  349. for (name, data) in lib.objects.iter() {
  350. // Skip already linked libraries
  351. if !lib.mmaps.contains_key(&*name) && !self.root.mmaps.contains_key(&*name) {
  352. elfs.insert(name.as_str(), Elf::parse(&data)?);
  353. } else {
  354. skip_list.insert(name.as_str());
  355. }
  356. }
  357. elfs
  358. };
  359. // Load all ELF files into memory and find all globals
  360. let mut tls_primary = 0;
  361. let mut tls_size = 0;
  362. for (elf_name, elf) in elfs.iter() {
  363. if skip_list.contains(elf_name) {
  364. continue;
  365. }
  366. if self.verbose {
  367. println!("map {}", elf_name);
  368. }
  369. let object = match lib.objects.get(*elf_name) {
  370. Some(some) => some,
  371. None => continue,
  372. };
  373. // data for struct LinkMap
  374. let mut l_ld = 0;
  375. // Calculate virtual memory bounds
  376. let bounds = {
  377. let mut bounds_opt: Option<(usize, usize)> = None;
  378. for ph in elf.program_headers.iter() {
  379. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  380. let vaddr = ph.p_vaddr as usize - voff;
  381. let vsize =
  382. ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  383. match ph.p_type {
  384. program_header::PT_DYNAMIC => {
  385. l_ld = ph.p_vaddr;
  386. }
  387. program_header::PT_LOAD => {
  388. if self.verbose {
  389. println!(" load {:#x}, {:#x}: {:x?}", vaddr, vsize, ph);
  390. }
  391. if let Some(ref mut bounds) = bounds_opt {
  392. if vaddr < bounds.0 {
  393. bounds.0 = vaddr;
  394. }
  395. if vaddr + vsize > bounds.1 {
  396. bounds.1 = vaddr + vsize;
  397. }
  398. } else {
  399. bounds_opt = Some((vaddr, vaddr + vsize));
  400. }
  401. }
  402. program_header::PT_TLS => {
  403. if self.verbose {
  404. println!(" load tls {:#x}: {:x?}", vsize, ph);
  405. }
  406. tls_size += vsize;
  407. if Some(*elf_name) == primary_opt {
  408. tls_primary += vsize;
  409. }
  410. }
  411. _ => (),
  412. }
  413. }
  414. match bounds_opt {
  415. Some(some) => some,
  416. None => continue,
  417. }
  418. };
  419. if self.verbose {
  420. println!(" bounds {:#x}, {:#x}", bounds.0, bounds.1);
  421. }
  422. // Allocate memory
  423. let mmap = unsafe {
  424. let same_elf = if let Some(prog) = dso.as_ref() {
  425. if prog.name == *elf_name {
  426. true
  427. } else {
  428. false
  429. }
  430. } else {
  431. false
  432. };
  433. if same_elf {
  434. let addr = dso.as_ref().unwrap().base_addr;
  435. let size = if is_pie_enabled(&elf) {
  436. bounds.1
  437. } else {
  438. bounds.1 - bounds.0
  439. };
  440. // Fill the gaps i the binary
  441. let mut ranges = Vec::new();
  442. for ph in elf.program_headers.iter() {
  443. if ph.p_type == program_header::PT_LOAD {
  444. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  445. let vaddr = ph.p_vaddr as usize - voff;
  446. let vsize = ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE)
  447. * PAGE_SIZE;
  448. if is_pie_enabled(&elf) {
  449. ranges.push((vaddr, vsize));
  450. } else {
  451. ranges.push((vaddr - addr, vsize));
  452. }
  453. }
  454. }
  455. ranges.sort();
  456. let mut start = addr;
  457. for (vaddr, vsize) in ranges.iter() {
  458. if start < addr + vaddr {
  459. if self.verbose {
  460. println!("mmap({:#x}, {})", start, addr + vaddr - start);
  461. }
  462. let mut flags = sys_mman::MAP_ANONYMOUS | sys_mman::MAP_PRIVATE;
  463. if start != 0 {
  464. flags |= sys_mman::MAP_FIXED_NOREPLACE;
  465. }
  466. let ptr = sys_mman::mmap(
  467. start as *mut c_void,
  468. addr + vaddr - start,
  469. //TODO: Make it possible to not specify PROT_EXEC on Redox
  470. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  471. flags,
  472. -1,
  473. 0,
  474. );
  475. if ptr as usize == !0
  476. /* MAP_FAILED */
  477. {
  478. return Err(Error::Malformed(format!(
  479. "failed to map {}. errno: {}",
  480. elf_name, STR_ERROR[errno as usize]
  481. )));
  482. }
  483. if start as *mut c_void != ptr::null_mut() {
  484. assert_eq!(
  485. ptr, start as *mut c_void,
  486. "mmap must always map on the destination we requested"
  487. );
  488. }
  489. }
  490. start = addr + vaddr + vsize
  491. }
  492. sys_mman::mprotect(
  493. addr as *mut c_void,
  494. size,
  495. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  496. );
  497. _r_debug.insert_first(addr as usize, &elf_name, addr + l_ld as usize);
  498. (
  499. addr as usize,
  500. slice::from_raw_parts_mut(addr as *mut u8, size),
  501. )
  502. } else {
  503. let (start, end) = bounds;
  504. let size = end - start;
  505. if self.verbose {
  506. println!("mmap({:#x}, {})", start, size);
  507. }
  508. let mut flags = sys_mman::MAP_ANONYMOUS | sys_mman::MAP_PRIVATE;
  509. if start != 0 {
  510. flags |= sys_mman::MAP_FIXED_NOREPLACE;
  511. }
  512. let ptr = sys_mman::mmap(
  513. start as *mut c_void,
  514. size,
  515. //TODO: Make it possible to not specify PROT_EXEC on Redox
  516. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  517. flags,
  518. -1,
  519. 0,
  520. );
  521. if ptr as usize == !0
  522. /* MAP_FAILED */
  523. {
  524. return Err(Error::Malformed(format!(
  525. "failed to map {}. errno: {}",
  526. elf_name, STR_ERROR[errno as usize]
  527. )));
  528. }
  529. if start as *mut c_void != ptr::null_mut() {
  530. assert_eq!(
  531. ptr, start as *mut c_void,
  532. "mmap must always map on the destination we requested"
  533. );
  534. }
  535. ptr::write_bytes(ptr as *mut u8, 0, size);
  536. _r_debug.insert(ptr as usize, &elf_name, ptr as usize + l_ld as usize);
  537. (start, slice::from_raw_parts_mut(ptr as *mut u8, size))
  538. }
  539. };
  540. if self.verbose {
  541. println!(" mmap {:p}, {:#x}", mmap.1.as_mut_ptr(), mmap.1.len());
  542. }
  543. let (globals, weak_syms) = Linker::collect_syms(&elf, &mmap.1, self.verbose)?;
  544. lib.globals.extend(globals.into_iter());
  545. lib.weak_syms.extend(weak_syms.into_iter());
  546. lib.mmaps.insert(elf_name.to_string(), mmap);
  547. }
  548. // Allocate TLS
  549. let mut tcb_opt = if primary_opt.is_some() {
  550. Some(unsafe { Tcb::new(tls_size)? })
  551. } else {
  552. None
  553. };
  554. if self.verbose {
  555. println!("tcb {:x?}", tcb_opt);
  556. }
  557. // Copy data
  558. let mut tls_offset = tls_primary;
  559. let mut tcb_masters = Vec::new();
  560. // Insert main image master
  561. tcb_masters.push(Master {
  562. ptr: ptr::null_mut(),
  563. len: 0,
  564. offset: 0,
  565. });
  566. let mut tls_ranges = BTreeMap::new();
  567. for (elf_name, elf) in elfs.iter() {
  568. if skip_list.contains(elf_name) {
  569. continue;
  570. }
  571. let same_elf = if let Some(prog) = dso.as_ref() {
  572. if prog.name == *elf_name {
  573. true
  574. } else {
  575. false
  576. }
  577. } else {
  578. false
  579. };
  580. if same_elf {
  581. continue;
  582. }
  583. let object = match lib.objects.get(*elf_name) {
  584. Some(some) => some,
  585. None => continue,
  586. };
  587. let &mut (base_addr, ref mut mmap) = match lib.mmaps.get_mut(*elf_name) {
  588. Some(some) => some,
  589. None => continue,
  590. };
  591. if self.verbose {
  592. println!("load {}", elf_name);
  593. }
  594. // Copy data
  595. for ph in elf.program_headers.iter() {
  596. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  597. let vaddr = ph.p_vaddr as usize - voff;
  598. let vsize = ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  599. match ph.p_type {
  600. program_header::PT_LOAD => {
  601. let obj_data = {
  602. let range = ph.file_range();
  603. match object.get(range.clone()) {
  604. Some(some) => some,
  605. None => {
  606. return Err(Error::Malformed(format!(
  607. "failed to read {:x?}",
  608. range
  609. )))
  610. }
  611. }
  612. };
  613. let mmap_data = {
  614. let range = ph.p_vaddr as usize - base_addr
  615. ..ph.p_vaddr as usize + obj_data.len() - base_addr;
  616. match mmap.get_mut(range.clone()) {
  617. Some(some) => some,
  618. None => {
  619. println!("mmap: {}", mmap.len());
  620. return Err(Error::Malformed(format!(
  621. "failed to write {:x?}",
  622. range
  623. )));
  624. }
  625. }
  626. };
  627. if self.verbose {
  628. println!(
  629. " copy {:#x}, {:#x}: {:#x}, {:#x}",
  630. vaddr,
  631. vsize,
  632. voff,
  633. obj_data.len()
  634. );
  635. }
  636. mmap_data.copy_from_slice(obj_data);
  637. }
  638. program_header::PT_TLS => {
  639. let valign = if ph.p_align > 0 {
  640. ((ph.p_memsz + (ph.p_align - 1)) / ph.p_align) * ph.p_align
  641. } else {
  642. ph.p_memsz
  643. } as usize;
  644. let mut tcb_master = Master {
  645. ptr: unsafe { mmap.as_ptr().add(ph.p_vaddr as usize - base_addr) },
  646. len: ph.p_filesz as usize,
  647. offset: tls_size - valign,
  648. };
  649. if self.verbose {
  650. println!(
  651. " tls master {:p}, {:#x}: {:#x}, {:#x}",
  652. tcb_master.ptr, tcb_master.len, tcb_master.offset, valign,
  653. );
  654. }
  655. if Some(*elf_name) == primary_opt {
  656. tls_ranges.insert(
  657. elf_name.to_string(),
  658. (self.tls_index_offset, tcb_master.range()),
  659. );
  660. tcb_masters[0] = tcb_master;
  661. } else {
  662. tcb_master.offset -= tls_offset;
  663. tls_offset += vsize;
  664. tls_ranges.insert(
  665. elf_name.to_string(),
  666. (
  667. self.tls_index_offset + tcb_masters.len(),
  668. tcb_master.range(),
  669. ),
  670. );
  671. tcb_masters.push(tcb_master);
  672. }
  673. }
  674. _ => (),
  675. }
  676. }
  677. }
  678. self.tls_index_offset += tcb_masters.len();
  679. // Set master images for TLS and copy TLS data
  680. if let Some(ref mut tcb) = tcb_opt {
  681. unsafe {
  682. tcb.set_masters(tcb_masters.into_boxed_slice());
  683. tcb.copy_masters()?;
  684. }
  685. }
  686. // Perform relocations, and protect pages
  687. for (elf_name, elf) in elfs.iter() {
  688. if skip_list.contains(elf_name) {
  689. continue;
  690. }
  691. if self.verbose {
  692. println!("link {}", elf_name);
  693. }
  694. // Relocate
  695. for rel in elf
  696. .dynrelas
  697. .iter()
  698. .chain(elf.dynrels.iter())
  699. .chain(elf.pltrelocs.iter())
  700. {
  701. // println!(" rel {}: {:x?}",
  702. // reloc::r_to_str(rel.r_type, elf.header.e_machine),
  703. // rel
  704. // );
  705. let symbol = if rel.r_sym > 0 {
  706. let sym = elf.dynsyms.get(rel.r_sym).ok_or(Error::Malformed(format!(
  707. "missing symbol for relocation {:?}",
  708. rel
  709. )))?;
  710. let name =
  711. elf.dynstrtab
  712. .get(sym.st_name)
  713. .ok_or(Error::Malformed(format!(
  714. "missing name for symbol {:?}",
  715. sym
  716. )))??;
  717. lib.get_sym(name).or_else(|| self.root.get_sym(name))
  718. } else {
  719. None
  720. };
  721. let s = symbol
  722. .as_ref()
  723. .map(|sym| sym.as_ptr() as usize)
  724. .unwrap_or(0);
  725. let a = rel.r_addend.unwrap_or(0) as usize;
  726. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  727. Some(some) => some,
  728. None => continue,
  729. };
  730. let b = mmap.as_mut_ptr() as usize;
  731. let (tm, t) = if let Some((tls_index, tls_range)) = tls_ranges.get(*elf_name) {
  732. (*tls_index, tls_range.start)
  733. } else {
  734. (0, 0)
  735. };
  736. let ptr = if is_pie_enabled(&elf) {
  737. unsafe { mmap.as_mut_ptr().add(rel.r_offset as usize) }
  738. } else {
  739. rel.r_offset as *mut u8
  740. };
  741. let set_u64 = |value| {
  742. // println!(" set_u64 {:#x}", value);
  743. unsafe {
  744. *(ptr as *mut u64) = value;
  745. }
  746. };
  747. match rel.r_type {
  748. reloc::R_X86_64_64 => {
  749. set_u64((s + a) as u64);
  750. }
  751. reloc::R_X86_64_DTPMOD64 => {
  752. set_u64(tm as u64);
  753. }
  754. reloc::R_X86_64_DTPOFF64 => {
  755. if s != 0 {
  756. set_u64((s - b) as u64);
  757. } else {
  758. set_u64(s as u64);
  759. }
  760. }
  761. reloc::R_X86_64_GLOB_DAT | reloc::R_X86_64_JUMP_SLOT => {
  762. set_u64(s as u64);
  763. }
  764. reloc::R_X86_64_RELATIVE => {
  765. set_u64((b + a) as u64);
  766. }
  767. reloc::R_X86_64_TPOFF64 => {
  768. set_u64((s + a).wrapping_sub(t) as u64);
  769. }
  770. reloc::R_X86_64_IRELATIVE => (), // Handled below
  771. reloc::R_X86_64_COPY => unsafe {
  772. // TODO: Make this work
  773. let sym = symbol
  774. .as_ref()
  775. .expect("R_X86_64_COPY called without valid symbol");
  776. ptr::copy_nonoverlapping(sym.as_ptr() as *const u8, ptr, sym.size as usize);
  777. },
  778. _ => {
  779. panic!(
  780. " {} unsupported",
  781. reloc::r_to_str(rel.r_type, elf.header.e_machine)
  782. );
  783. }
  784. }
  785. }
  786. // overwrite DT_DEBUG if exist in DYNAMIC segment
  787. // first we identify the location of DYNAMIC segment
  788. let mut dyn_start = None;
  789. let mut debug_start = None;
  790. for ph in elf.program_headers.iter() {
  791. if ph.p_type == program_header::PT_DYNAMIC {
  792. dyn_start = Some(ph.p_vaddr as usize);
  793. }
  794. }
  795. // next we identify the location of DT_DEBUG in .dynamic section
  796. if let Some(dynamic) = elf.dynamic.as_ref() {
  797. let mut i = 0;
  798. for entry in &dynamic.dyns {
  799. if entry.d_tag == DT_DEBUG {
  800. debug_start = Some(i as usize);
  801. break;
  802. }
  803. i += 1;
  804. }
  805. }
  806. if let Some(dyn_start_addr) = dyn_start {
  807. if let Some(i) = debug_start {
  808. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  809. Some(some) => some,
  810. None => continue,
  811. };
  812. let bytes: [u8; size_of::<Dyn>() / 2] =
  813. unsafe { transmute((&_r_debug) as *const RTLDDebug as usize) };
  814. let start = if is_pie_enabled(elf) {
  815. dyn_start_addr + i * size_of::<Dyn>() + size_of::<Dyn>() / 2
  816. } else {
  817. dyn_start_addr + i * size_of::<Dyn>() + size_of::<Dyn>() / 2
  818. - mmap.as_mut_ptr() as usize
  819. };
  820. mmap[start..start + size_of::<Dyn>() / 2].clone_from_slice(&bytes);
  821. }
  822. }
  823. // Protect pages
  824. for ph in elf.program_headers.iter() {
  825. if ph.p_type == program_header::PT_LOAD {
  826. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  827. let vaddr = ph.p_vaddr as usize - voff;
  828. let vsize =
  829. ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  830. let mut prot = 0;
  831. if ph.p_flags & program_header::PF_R == program_header::PF_R {
  832. prot |= sys_mman::PROT_READ;
  833. }
  834. // W ^ X. If it is executable, do not allow it to be writable, even if requested
  835. if ph.p_flags & program_header::PF_X == program_header::PF_X {
  836. prot |= sys_mman::PROT_EXEC;
  837. } else if ph.p_flags & program_header::PF_W == program_header::PF_W {
  838. prot |= sys_mman::PROT_WRITE;
  839. }
  840. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  841. Some(some) => some,
  842. None => continue,
  843. };
  844. let res = unsafe {
  845. let ptr = if is_pie_enabled(elf) {
  846. mmap.as_mut_ptr().add(vaddr)
  847. } else {
  848. vaddr as *const u8
  849. };
  850. if self.verbose {
  851. println!(" prot {:#x}, {:#x}: {:p}, {:#x}", vaddr, vsize, ptr, prot);
  852. }
  853. sys_mman::mprotect(ptr as *mut c_void, vsize, prot)
  854. };
  855. if res < 0 {
  856. return Err(Error::Malformed(format!("failed to mprotect {}", elf_name)));
  857. }
  858. }
  859. }
  860. }
  861. // Activate TLS
  862. if let Some(ref mut tcb) = tcb_opt {
  863. unsafe {
  864. tcb.activate();
  865. }
  866. }
  867. // Perform indirect relocations (necessary evil), gather entry point
  868. let mut entry_opt = None;
  869. for (elf_name, elf) in elfs.iter() {
  870. if skip_list.contains(elf_name) {
  871. continue;
  872. }
  873. let (_, mmap) = match lib.mmaps.get_mut(*elf_name) {
  874. Some(some) => some,
  875. None => continue,
  876. };
  877. if self.verbose {
  878. println!("entry {}", elf_name);
  879. }
  880. if Some(*elf_name) == primary_opt {
  881. if is_pie_enabled(&elf) {
  882. entry_opt = Some(mmap.as_mut_ptr() as usize + elf.header.e_entry as usize);
  883. } else {
  884. entry_opt = Some(elf.header.e_entry as usize);
  885. }
  886. }
  887. // Relocate
  888. for rel in elf
  889. .dynrelas
  890. .iter()
  891. .chain(elf.dynrels.iter())
  892. .chain(elf.pltrelocs.iter())
  893. {
  894. // println!(" rel {}: {:x?}",
  895. // reloc::r_to_str(rel.r_type, elf.header.e_machine),
  896. // rel
  897. // );
  898. let a = rel.r_addend.unwrap_or(0) as usize;
  899. let b = mmap.as_mut_ptr() as usize;
  900. let ptr = unsafe { mmap.as_mut_ptr().add(rel.r_offset as usize) };
  901. let set_u64 = |value| {
  902. // println!(" set_u64 {:#x}", value);
  903. unsafe {
  904. *(ptr as *mut u64) = value;
  905. }
  906. };
  907. if rel.r_type == reloc::R_X86_64_IRELATIVE {
  908. unsafe {
  909. let f: unsafe extern "C" fn() -> u64 = transmute(b + a);
  910. set_u64(f());
  911. }
  912. }
  913. }
  914. // Protect pages
  915. for ph in elf.program_headers.iter() {
  916. if let program_header::PT_LOAD = ph.p_type {
  917. let voff = ph.p_vaddr as usize % PAGE_SIZE;
  918. let vaddr = ph.p_vaddr as usize - voff;
  919. let vsize =
  920. ((ph.p_memsz as usize + voff + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
  921. let mut prot = 0;
  922. if ph.p_flags & program_header::PF_R == program_header::PF_R {
  923. prot |= sys_mman::PROT_READ;
  924. }
  925. // W ^ X. If it is executable, do not allow it to be writable, even if requested
  926. if ph.p_flags & program_header::PF_X == program_header::PF_X {
  927. prot |= sys_mman::PROT_EXEC;
  928. } else if ph.p_flags & program_header::PF_W == program_header::PF_W {
  929. prot |= sys_mman::PROT_WRITE;
  930. }
  931. let res = unsafe {
  932. let ptr = if is_pie_enabled(&elf) {
  933. mmap.as_mut_ptr().add(vaddr)
  934. } else {
  935. vaddr as *const u8
  936. };
  937. if self.verbose {
  938. println!(" prot {:#x}, {:#x}: {:p}, {:#x}", vaddr, vsize, ptr, prot);
  939. }
  940. sys_mman::mprotect(ptr as *mut c_void, vsize, prot)
  941. };
  942. if res < 0 {
  943. return Err(Error::Malformed(format!("failed to mprotect {}", elf_name)));
  944. }
  945. }
  946. }
  947. }
  948. unsafe { _r_debug.state = RTLDState::RT_CONSISTENT };
  949. _dl_debug_state();
  950. Ok(entry_opt)
  951. }
  952. }
  953. unsafe fn call_inits_finis(addr: usize) {
  954. let func = transmute::<usize, *const Option<extern "C" fn()>>(addr);
  955. (*func).map(|x| x());
  956. }
  957. fn is_pie_enabled(elf: &Elf) -> bool {
  958. if elf.header.e_type == ET_DYN {
  959. true
  960. } else {
  961. false
  962. }
  963. }