- Nov 2024
-
onlinelibrary.wiley.com onlinelibrary.wiley.com
-
Disease: Platelet-type Von-willebrand Disorder (PT-VWD)
Patient: 17 yo, male, adopted
Variant: GP1BA NM_000173.7: c:580C>T p.(P.Leu194Phe), Heterozygous, gain-of-function
Phenotypes: moderate bleeding phenotype, ISTH-BAT bleeding score of 3, recurrent epistaxis, easy bruising, mild thrombocytopenia
Family: Adopted, no other family history mentioned, segregation studies not performed.
Genetic analysis performed: found variant in GP1BA, results obtained by sanger sequencing.
Variant present in gnomAD(rs368111193): low allele frequency, contradictory classifications
Variant is not present in ClinVar, LOVD, or HGMD databases
According to this paper, ACMG guidelines classified this variant as a VUS.
This paper entered it into Clinvar (var ID 1693270)
-
-
elixir.bootlin.com elixir.bootlin.com
-
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, flags, pages); break; } if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { undo_dev_pagemap(nr, nr_start, flags, pages); break; } SetPageReferenced(page); pages[*nr] = page; if (unlikely(try_grab_page(page, flags))) { undo_dev_pagemap(nr, nr_start, flags, pages); break; } (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); put_dev_pagemap(pgmap); return addr == end; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, flags, pages); return 0; } return 1; } static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, flags, pages); return 0; } return 1; } #else static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif
seems like a check to see if pages can be grabbed. A quick skim maybe hints possible checks if huge pages can be grabbed?
-
#ifdef CONFIG_ARCH_HAS_HUGEPD static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, unsigned long sz) { unsigned long __boundary = (addr + sz) & ~(sz-1); return (__boundary - 1 < end - 1) ? __boundary : end; } static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long pte_end; struct page *page; struct folio *folio; pte_t pte; int refs; pte_end = (addr + sz) & ~(sz-1); if (pte_end < end) end = pte_end; pte = huge_ptep_get(ptep); if (!pte_access_permitted(pte, flags & FOLL_WRITE)) return 0; /* hugepages are never "special" */ VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); folio = try_grab_folio(page, refs, flags); if (!folio) return 0; if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, refs, flags); return 0; } if (!folio_fast_pin_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; } *nr += refs; folio_set_referenced(folio); return 1; } static int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, struct page **pages, int *nr) { pte_t *ptep; unsigned long sz = 1UL << hugepd_shift(hugepd); unsigned long next; ptep = hugepte_offset(hugepd, addr, pdshift); do { next = hugepte_addr_end(addr, end, sz); if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) return 0; } while (ptep++, addr = next, addr != end); return 1; } #else static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_HUGEPD */ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { struct page *page; struct folio *folio; int refs; if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) return 0; if (pmd_devmap(orig)) { if (unlikely(flags & FOLL_LONGTERM)) return 0; return __gup_device_huge_pmd(orig, pmdp, addr, end, flags, pages, nr); } page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); folio = try_grab_folio(page, refs, flags); if (!folio) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { gup_put_folio(folio, refs, flags); return 0; } if (!folio_fast_pin_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; } *nr += refs; folio_set_referenced(folio); return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { struct page *page; struct folio *folio; int refs; if (!pud_access_permitted(orig, flags & FOLL_WRITE)) return 0; if (pud_devmap(orig)) { if (unlikely(flags & FOLL_LONGTERM)) return 0; return __gup_device_huge_pud(orig, pudp, addr, end, flags, pages, nr); } page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); folio = try_grab_folio(page, refs, flags); if (!folio) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { gup_put_folio(folio, refs, flags); return 0; } if (!folio_fast_pin_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; } *nr += refs; folio_set_referenced(folio); return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { int refs; struct page *page; struct folio *folio; if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); folio = try_grab_folio(page, refs, flags); if (!folio) return 0; if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { gup_put_folio(folio, refs, flags); return 0; } if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; } if (!folio_fast_pin_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } *nr += refs; folio_set_referenced(folio); return 1; } static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset_lockless(pudp, pud, addr); do { pmd_t pmd = pmdp_get_lockless(pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { /* See gup_pte_range() */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, flags, pages, nr)) return 0; } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset_lockless(p4dp, p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (unlikely(!pud_present(pud))) return 0; if (unlikely(pud_huge(pud) || pud_devmap(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, flags, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, flags, pages, nr)) return 0; } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset_lockless(pgdp, pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, flags, pages, nr)) return 0; } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, flags, pages, nr)) return; } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #else static inline void gup_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { }
policy use functions for gup_huge pte policy code function above (not right above, gotta scroll probably to find it)
-
static int internal_get_user_pages_fast(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) { unsigned long len, end; unsigned long nr_pinned; int locked = 0; int ret; if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | FOLL_FORCE | FOLL_PIN | FOLL_GET | FOLL_FAST_ONLY | FOLL_NOFAULT | FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) return -EINVAL; if (gup_flags & FOLL_PIN) mm_set_has_pinned_flag(¤t->mm->flags); if (!(gup_flags & FOLL_FAST_ONLY)) might_lock_read(¤t->mm->mmap_lock); start = untagged_addr(start) & PAGE_MASK; len = nr_pages << PAGE_SHIFT; if (check_add_overflow(start, len, &end)) return -EOVERFLOW; if (end > TASK_SIZE_MAX) return -EFAULT; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) return nr_pinned; /* Slow path: try to get the remaining pages with get_user_pages */ start += nr_pinned << PAGE_SHIFT; pages += nr_pinned; ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, pages, &locked, gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); if (ret < 0) { /* * The caller has to unpin the pages we already pinned so * returning -errno is not an option */ if (nr_pinned) return nr_pinned; return ret; } return ret + nr_pinned; } /** * get_user_pages_fast_only() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * * If the architecture does not support this function, simply return with no * pages pinned. * * Careful, careful! COW breaking can go either way, so a non-write * access can get ambiguous page results. If you call this function without * 'write' set, you'd better be sure that you're ok with that ambiguity. */ int get_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { /* * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, * because gup fast is always a "pin with a +1 page refcount" request. * * FOLL_FAST_ONLY is required in order to match the API description of * this routine: no fall back to regular ("slow") GUP. */ if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET | FOLL_FAST_ONLY)) return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(get_user_pages_fast_only); /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_lock. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number requested. * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns * -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { /* * The caller may or may not have explicitly set FOLL_GET; either way is * OK. However, internally (within mm/gup.c), gup fast variants must set * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" * request. */ if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(get_user_pages_fast); /** * pin_user_pages_fast() - pin user pages in memory without taking locks * * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See * get_user_pages_fast() for documentation on the function arguments, because * the arguments here are identical. * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for further details. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page() will not remove pins from it. */ int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(pin_user_pages_fast); /** * pin_user_pages_remote() - pin pages of a remote process * * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See * get_user_pages_remote() for documentation on the function arguments, because * the arguments here are identical. * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { int local_locked = 1; if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) return 0; return __gup_longterm_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages_remote); /** * pin_user_pages() - pin user pages in memory for use by other devices * * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and * FOLL_PIN is set. * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) { int locked = 1; if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); /* * pin_user_pages_unlocked() is the FOLL_PIN variant of * get_user_pages_unlocked(). Behavior is the same, except that this one sets * FOLL_PIN and rejects FOLL_GET. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { int locked = 0; if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); }
fast gup functions
-
/** * unpin_user_pages() - release an array of gup-pinned pages. * @pages: array of pages to be marked dirty and released. * @npages: number of pages in the @pages array. * * For each page in the @pages array, release the page using unpin_user_page(). * * Please see the unpin_user_page() documentation for details. */ void unpin_user_pages(struct page **pages, unsigned long npages) { unsigned long i; struct folio *folio; unsigned int nr; /* * If this WARN_ON() fires, then the system *might* be leaking pages (by * leaving them pinned), but probably not. More likely, gup/pup returned * a hard -ERRNO error to the caller, who erroneously passed it here. */ if (WARN_ON(IS_ERR_VALUE(npages))) return; sanity_check_pinned_pages(pages, npages); for (i = 0; i < npages; i += nr) { folio = gup_folio_next(pages, npages, i, &nr); gup_put_folio(folio, nr, FOLL_PIN); } }
gup unpin function, not actual logic
-
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty) { unsigned long i; struct folio *folio; unsigned int nr; for (i = 0; i < npages; i += nr) { folio = gup_folio_range_next(page, npages, i, &nr); if (make_dirty && !folio_test_dirty(folio)) { folio_lock(folio); folio_mark_dirty(folio); folio_unlock(folio); } gup_put_folio(folio, nr, FOLL_PIN); } }
unpin logic but for dirty pages
-
if ((flags & FOLL_DUMP) && (vma_is_anonymous(vma) || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL;
explained in comments
-
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL /* * Fast-gup relies on pte change detection to avoid concurrent pgtable * operations. * * To pin the page, fast-gup needs to do below in order: * (1) pin the page (by prefetching pte), then (2) check pte not changed. * * For the rest of pgtable operations where pgtable updates can be racy * with fast-gup, we need to do (1) clear pte, then (2) check whether page * is pinned. * * Above will work for all pte-level operations, including THP split. * * For THP collapse, it's a bit more complicated because fast-gup may be * walking a pgtable page that is being freed (pte is still valid but pmd * can be cleared already). To avoid race in such condition, we need to * also check pmd here to make sure pmd doesn't change (corresponds to * pmdp_collapse_flush() in the THP collapse code path). */ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); if (!ptep) return 0; do { pte_t pte = ptep_get_lockless(ptep); struct page *page; struct folio *folio; /* * Always fallback to ordinary GUP on PROT_NONE-mapped pages: * pte_access_permitted() better should reject these pages * either way: otherwise, GUP-fast might succeed in * cases where ordinary GUP would fail due to VMA access * permissions. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, flags & FOLL_WRITE)) goto pte_unmap; if (pte_devmap(pte)) { if (unlikely(flags & FOLL_LONGTERM)) goto pte_unmap; pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, flags, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); folio = try_grab_folio(page, 1, flags); if (!folio) goto pte_unmap; if (unlikely(folio_is_secretmem(folio))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (!folio_fast_pin_allowed(folio, flags)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } /* * We need to make the page accessible if and only if we are * going to access its content (the FOLL_PIN case). Please * see Documentation/core-api/pin_user_pages.rst for * details. */ if (flags & FOLL_PIN) { ret = arch_make_page_accessible(page); if (ret) { gup_put_folio(folio, 1, flags); goto pte_unmap; } } folio_set_referenced(folio); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * get_user_pages_fast_only implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
non concurrent fast gup approach that checks for pinned page and unmaps pte or clears it
-
#ifdef CONFIG_HAVE_FAST_GUP /* * Used in the GUP-fast path to determine whether a pin is permitted for a * specific folio. * * This call assumes the caller has pinned the folio, that the lowest page table * level still points to this folio, and that interrupts have been disabled. * * Writing to pinned file-backed dirty tracked folios is inherently problematic * (see comment describing the writable_file_mapping_allowed() function). We * therefore try to avoid the most egregious case of a long-term mapping doing * so. * * This function cannot be as thorough as that one as the VMA is not available * in the fast path, so instead we whitelist known good cases and if in doubt, * fall back to the slow path. */ static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) { struct address_space *mapping; unsigned long mapping_flags; /* * If we aren't pinning then no problematic write can occur. A long term * pin is the most egregious case so this is the one we disallow. */ if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) return true; /* The folio is pinned, so we can safely access folio fields. */ if (WARN_ON_ONCE(folio_test_slab(folio))) return false; /* hugetlb mappings do not require dirty-tracking. */ if (folio_test_hugetlb(folio)) return true; /* * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods * cannot proceed, which means no actions performed under RCU can * proceed either. * * inodes and thus their mappings are freed under RCU, which means the * mapping cannot be freed beneath us and thus we can safely dereference * it. */ lockdep_assert_irqs_disabled(); /* * However, there may be operations which _alter_ the mapping, so ensure * we read it once and only once. */ mapping = READ_ONCE(folio->mapping); /* * The mapping may have been truncated, in any case we cannot determine * if this mapping is safe - fall back to slow path to determine how to * proceed. */ if (!mapping) return false; /* Anonymous folios pose no problem. */ mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; if (mapping_flags) return mapping_flags & PAGE_MAPPING_ANON; /* * At this point, we know the mapping is non-null and points to an * address_space object. The only remaining whitelisted file system is * shmem. */ return shmem_mapping(mapping); }
policy logic. avoids locks unlike get user pages unlocked/locked which seems risky so its not supposed to be used on concurrent gup logic
-
static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, unsigned int flags, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); if (flags & FOLL_PIN) unpin_user_page(page); else put_page(page); } }
policy use function that undoes mapping
-
#ifdef CONFIG_MIGRATION /* * Returns the number of collected pages. Return value is always >= 0. */ static unsigned long collect_longterm_unpinnable_pages( struct list_head *movable_page_list, unsigned long nr_pages, struct page **pages) { unsigned long i, collected = 0; struct folio *prev_folio = NULL; bool drain_allow = true; for (i = 0; i < nr_pages; i++) { struct folio *folio = page_folio(pages[i]); if (folio == prev_folio) continue; prev_folio = folio; if (folio_is_longterm_pinnable(folio)) continue; collected++; if (folio_is_device_coherent(folio)) continue; if (folio_test_hugetlb(folio)) { isolate_hugetlb(folio, movable_page_list); continue; } if (!folio_test_lru(folio) && drain_allow) { lru_add_drain_all(); drain_allow = false; } if (!folio_isolate_lru(folio)) continue; list_add_tail(&folio->lru, movable_page_list); node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); } return collected; }
-
#ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct page *page; int locked = 0; int ret; ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, FOLL_FORCE | FOLL_DUMP | FOLL_GET); return (ret == 1) ? page : NULL; } #endif /* CONFIG_ELF_CORE */
part of policy use code likely
-
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; mmap_read_lock(mm); vma = find_vma_intersection(mm, nstart, end); } else if (nstart >= vma->vm_end) vma = find_vma_intersection(mm, vma->vm_end, end); if (!vma) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) mmap_read_unlock(mm); return ret; /* 0 or negative error code */ }
policy use function that populates pages like the func before this.
-
long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *locked) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int local_locked = 1; int gup_flags; long ret; VM_BUG_ON(!PAGE_ALIGNED(start)); VM_BUG_ON(!PAGE_ALIGNED(end)); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); mmap_assert_locked(mm); /* * Rightly or wrongly, the VM_LOCKONFAULT case has never used * faultin_page() to break COW, so it has no work to do here. */ if (vma->vm_flags & VM_LOCKONFAULT) return nr_pages; gup_flags = FOLL_TOUCH; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma_is_accessible(vma)) gup_flags |= FOLL_FORCE; if (locked) gup_flags |= FOLL_UNLOCKABLE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ ret = __get_user_pages(mm, start, nr_pages, gup_flags, NULL, locked ? locked : &local_locked); lru_add_drain(); return ret; }
policy use code.
-
long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { int local_locked = 1; if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_TOUCH | FOLL_REMOTE)) return -EINVAL; return __get_user_pages_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); }
policy logic
-
static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int flags) { long ret, pages_done; bool must_unlock = false; /* * The internal caller expects GUP to manage the lock internally and the * lock must be released when this returns. */ if (!*locked) { if (mmap_read_lock_killable(mm)) return -EAGAIN; must_unlock = true; *locked = 1; } else mmap_assert_locked(mm); if (flags & FOLL_PIN) mm_set_has_pinned_flag(&mm->flags); /* * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior * is to set FOLL_GET if the caller wants pages[] filled in (but has * carelessly failed to specify FOLL_GET), so keep doing that, but only * for FOLL_GET, not for the newer FOLL_PIN. * * FOLL_PIN always expects pages to be non-null, but no need to assert * that here, as any failures will be obvious enough. */ if (pages && !(flags & FOLL_PIN)) flags |= FOLL_GET; pages_done = 0; for (;;) { ret = __get_user_pages(mm, start, nr_pages, flags, pages, locked); if (!(flags & FOLL_UNLOCKABLE)) { /* VM_FAULT_RETRY couldn't trigger, bypass */ pages_done = ret; break; } /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* * VM_FAULT_RETRY triggered, so seek to the faulting offset. * For the prefault case (!pages) we only update counts. */ if (likely(pages)) pages += ret; start += ret << PAGE_SHIFT; /* The lock was temporarily dropped, so we must unlock later */ must_unlock = true; retry: /* * Repeat on the address that fired VM_FAULT_RETRY * with both FAULT_FLAG_ALLOW_RETRY and * FAULT_FLAG_TRIED. Note that GUP can be interrupted * by fatal signals of even common signals, depending on * the caller's request. So we need to check it before we * start trying again otherwise it can loop forever. */ if (gup_signal_pending(flags)) { if (!pages_done) pages_done = -EINTR; break; } ret = mmap_read_lock_killable(mm); if (ret) { BUG_ON(ret > 0); if (!pages_done) pages_done = ret; break; } *locked = 1; ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, pages, locked); if (!*locked) { /* Continue to retry until we succeeded */ BUG_ON(ret != 0); goto retry; } if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; if (likely(pages)) pages++; start += PAGE_SIZE; } if (must_unlock && *locked) { /* * We either temporarily dropped the lock, or the caller * requested that we both acquire and drop the lock. Either way, * we must now unlock, and notify the caller of that state. */ mmap_read_unlock(mm); *locked = 0; } return pages_done; }
same as gup but sets/unsets mmap_lock
-
int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret; address = untagged_addr_remote(mm, address); if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; retry: vma = gup_vma_lookup(mm, address); if (!vma) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; if ((fault_flags & FAULT_FLAG_KILLABLE) && fatal_signal_pending(current)) return -EINTR; ret = handle_mm_fault(vma, address, fault_flags, NULL); if (ret & VM_FAULT_COMPLETED) { /* * NOTE: it's a pity that we need to retake the lock here * to pair with the unlock() in the callers. Ideally we * could tell the callers so they do not need to unlock. */ mmap_read_lock(mm); *unlocked = true; return 0; } if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { mmap_read_lock(mm); *unlocked = true; fault_flags |= FAULT_FLAG_TRIED; goto retry; } return 0; }
resolves user page fault. policy logic
-
static long __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; start = untagged_addr_remote(mm, start); VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { /* * MADV_POPULATE_(READ|WRITE) wants to handle VMA * lookups+error reporting differently. */ if (gup_flags & FOLL_MADV_POPULATE) { vma = vma_lookup(mm, start); if (!vma) { ret = -ENOMEM; goto out; } if (check_vma_flags(vma, gup_flags)) { ret = -EINVAL; goto out; } goto retry; } vma = gup_vma_lookup(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &page : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma) { ret = -EFAULT; goto out; } ret = check_vma_flags(vma, gup_flags); if (ret) goto out; } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -EINTR; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page || PTR_ERR(page) == -EMLINK) { ret = faultin_page(vma, start, &foll_flags, PTR_ERR(page) == -EMLINK, locked); switch (ret) { case 0: goto retry; case -EBUSY: case -EAGAIN: ret = 0; fallthrough; case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. If the caller expects **pages to be * filled in, bail out now, because that can't be done * for this page. */ if (pages) { ret = PTR_ERR(page); goto out; } } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } next_page: page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; if (pages) { struct page *subpage; unsigned int j; /* * This must be a large folio (and doesn't need to * be the whole folio; it can be part of it), do * the refcount work for all the subpages too. * * NOTE: here the page may not be the head page * e.g. when start addr is not thp-size aligned. * try_grab_folio() should have taken care of tail * pages. */ if (page_increm > 1) { struct folio *folio; /* * Since we already hold refcount on the * large folio, this should never fail. */ folio = try_grab_folio(page, page_increm - 1, foll_flags); if (WARN_ON_ONCE(!folio)) { /* * Release the 1st page ref if the * folio is problematic, fail hard. */ gup_put_folio(page_folio(page), 1, foll_flags); ret = -EFAULT; goto out; } } for (j = 0; j < page_increm; j++) { subpage = nth_page(page, j); pages[i + j] = subpage; flush_anon_page(vma, subpage, start + j * PAGE_SIZE); flush_dcache_page(subpage); } } i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; }
Literally the actual policy logic of gup. Most important piece of code right here for gup
-
#ifdef CONFIG_STACK_GROWSUP return vma_lookup(mm, addr); #else static volatile unsigned long next_warn; struct vm_area_struct *vma; unsigned long now, next; vma = find_vma(mm, addr); if (!vma || (addr >= vma->vm_start)) return vma; /* Only warn for half-way relevant accesses */ if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; if (vma->vm_start - addr > 65536) return NULL; /* Let's not warn more than once an hour.. */ now = jiffies; next = next_warn; if (next && time_before(now, next)) return NULL; next_warn = now + 60*60*HZ; /* Let people know things may have changed. */ pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", current->comm, task_pid_nr(current), vma->vm_start, vma->vm_end, addr); dump_stack(); return NULL;
helper func to lookup vma(virtual mem area) that warns per hour about half way relevant acc and changes in stack
-
static bool writable_file_mapping_allowed(struct vm_area_struct *vma, unsigned long gup_flags) { /* * If we aren't pinning then no problematic write can occur. A long term * pin is the most egregious case so this is the case we disallow. */ if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != (FOLL_PIN | FOLL_LONGTERM)) return true; /* * If the VMA does not require dirty tracking then no problematic write * can occur either. */ return !vma_needs_dirty_tracking(vma); }
Def policy code. checks if we can write to a map
-
if (*flags & FOLL_NOFAULT) return -EFAULT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (*flags & FOLL_UNLOCKABLE) { fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; /* * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. * That's because some callers may not be prepared to * handle early exits caused by non-fatal signals. */ if (*flags & FOLL_INTERRUPTIBLE) fault_flags |= FAULT_FLAG_INTERRUPTIBLE; } if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { /* * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED * can co-exist */ fault_flags |= FAULT_FLAG_TRIED; } if (unshare) { fault_flags |= FAULT_FLAG_UNSHARE; /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); } ret = handle_mm_fault(vma, address, fault_flags, NULL); if (ret & VM_FAULT_COMPLETED) { /* * With FAULT_FLAG_RETRY_NOWAIT we'll never release the * mmap lock in the page fault handler. Sanity check this. */ WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); *locked = 0; /* * We should do the same as VM_FAULT_RETRY, but let's not * return -EBUSY since that's not reflecting the reality of * what has happened - we've just fully completed a page * fault, with the mmap lock released. Use -EAGAIN to show * that we want to take the mmap lock _again_. */ return -EAGAIN; } if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *locked = 0; return -EBUSY; }
Seems it's just setting flags for page faults based on flags param
-
/* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); if (pgd_none(*pgd)) return -EFAULT; p4d = p4d_offset(pgd, address); if (p4d_none(*p4d)) return -EFAULT; pud = pud_offset(p4d, address); if (pud_none(*pud)) return -EFAULT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; pte = pte_offset_map(pmd, address); if (!pte) return -EFAULT; entry = ptep_get(pte); if (pte_none(entry)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, entry); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) goto unmap; *page = pte_page(entry); } ret = try_grab_page(*page, gup_flags); if (unlikely(ret)) goto unmap;
Most of these seem like sanity checks right up until line 897 i.e, 'if(!page)'* after which we seem to unmap the page.
-
static struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct mm_struct *mm = vma->vm_mm; ctx->page_mask = 0; /* * Call hugetlb_follow_page_mask for hugetlb vmas as it will use * special hugetlb page table walking code. This eliminates the * need to check for hugetlb entries in the general walking code. */ if (is_vm_hugetlb_page(vma)) return hugetlb_follow_page_mask(vma, address, flags, &ctx->page_mask); pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); return follow_p4d_mask(vma, address, pgd, flags, ctx); }
places mask after following page into pte
-
struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { struct follow_page_context ctx = { NULL }; struct page *page; if (vma_is_secretmem(vma)) return NULL; if (WARN_ON_ONCE(foll_flags & FOLL_PIN)) return NULL; /* * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect * to fail on PROT_NONE-mapped pages. */ page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return page; }
finds page
-
if (flags & FOLL_SPLIT_PMD) { spin_unlock(ptl); split_huge_pmd(vma, pmd, address); /* If pmd was left empty, stuff a page table in there quickly */ return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page;
we're finding the page again but storing page mask in ctx
-
if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) return no_page_table(vma, flags); ptl = pmd_lock(mm, pmd); if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); }
branch prediction to check if pmd is there and if it's big
-
if (pmd_none(pmdval)) return no_page_table(vma, flags); if (!pmd_present(pmdval)) return no_page_table(vma, flags); if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; }
checks if pmd is there. im assuming it's page middle dir.
-
/* FOLL_GET and FOLL_PIN are mutually exclusive. */ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) return no_page_table(vma, flags); pte = ptep_get(ptep); if (!pte_present(pte)) goto no_page; if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) goto no_page; page = vm_normal_page(vma, address, pte); /* * We only care about anon pages in can_follow_write_pte() and don't * have to worry about pte_devmap() because they are never anon. */ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, vma, flags)) { page = NULL; goto out; } if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { /* * Only return device mapping pages in the FOLL_GET or FOLL_PIN * case since they are only valid while holding the pgmap * reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { page = ERR_PTR(-EMLINK); goto out; } VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && !PageAnonExclusive(page), page); /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ ret = try_grab_page(page, flags); if (unlikely(ret)) { page = ERR_PTR(ret); goto out; } /* * We need to make the page accessible if and only if we are going * to access its content (the FOLL_PIN case). Please see * Documentation/core-api/pin_user_pages.rst for details. */ if (flags & FOLL_PIN) { ret = arch_make_page_accessible(page); if (ret) { unpin_user_page(page); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); }
finds page in pte. Judging by the complexity of the logic this is most likely policy code because we're literally getting user page
-
if (flags & FOLL_TOUCH) { pte_t orig_entry = ptep_get(pte); pte_t entry = orig_entry; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(orig_entry, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); }
uses pte to mark dirty pages and finds pfn in pte
-
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty) { unsigned long i; struct folio *folio; unsigned int nr; if (!make_dirty) { unpin_user_pages(pages, npages); return; } sanity_check_pinned_pages(pages, npages); for (i = 0; i < npages; i += nr) { folio = gup_folio_next(pages, npages, i, &nr); /* * Checking PageDirty at this point may race with * clear_page_dirty_for_io(), but that's OK. Two key * cases: * * 1) This code sees the page as already dirty, so it * skips the call to set_page_dirty(). That could happen * because clear_page_dirty_for_io() called * page_mkclean(), followed by set_page_dirty(). * However, now the page is going to get written back, * which meets the original intention of setting it * dirty, so all is well: clear_page_dirty_for_io() goes * on to call TestClearPageDirty(), and write the page * back. * * 2) This code sees the page as clean, so it calls * set_page_dirty(). The page stays dirty, despite being * written back, so it gets written back again in the * next writeback cycle. This is harmless. */ if (!folio_test_dirty(folio)) { folio_lock(folio); folio_mark_dirty(folio); folio_unlock(folio); } gup_put_folio(folio, nr, FOLL_PIN); } }
unpins and dirties page
-
static inline struct folio *gup_folio_next(struct page **list, unsigned long npages, unsigned long i, unsigned int *ntails) { struct folio *folio = page_folio(list[i]); unsigned int nr; for (nr = i + 1; nr < npages; nr++) { if (page_folio(list[nr]) != folio) break; } *ntails = nr - i; return folio; }
gets folio of next page along with reference to end of folio
-
static inline struct folio *gup_folio_range_next(struct page *start, unsigned long npages, unsigned long i, unsigned int *ntails) { struct page *next = nth_page(start, i); struct folio *folio = page_folio(next); unsigned int nr = 1; if (folio_test_large(folio)) nr = min_t(unsigned int, npages - i, folio_nr_pages(folio) - folio_page_idx(folio, next)); *ntails = nr; return folio; }
gets the folio of the next page from start to 'i' range. also gets the tail folio/reference
-
folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
function for adding reference
-
void unpin_user_page(struct page *page) { sanity_check_pinned_pages(&page, 1); gup_put_folio(page_folio(page), 1, FOLL_PIN); } EXPORT_SYMBOL(unpin_user_page);
actual policy use logic
-
struct folio *folio = page_folio(page); if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) return -ENOMEM; if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) return -EREMOTEIO; if (flags & FOLL_GET) folio_ref_inc(folio);
checks for code that is involved in policy but is not the actual logic
-
else if (flags & FOLL_PIN) { /* * Don't take a pin on the zero page - it's not going anywhere * and it is used in a *lot* of places. */ if (is_zero_page(page)) return 0; /* * Similar to try_grab_folio(): be sure to *also* * increment the normal page refcount field at least once, * so that the page really is pinned. */ if (folio_test_large(folio)) { folio_ref_add(folio, 1); atomic_add(1, &folio->_pincount); } else { folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); } node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1); }
Logic that actually tries to grab the folio. Also policy use code and not actual policy
-
if (!put_devmap_managed_page_refs(&folio->page, refs)) folio_put_refs(folio, refs);
Definitely a vital and straightforward policy use section of gup that simples places a reference on the folio
-
if (flags & FOLL_PIN) { if (is_zero_folio(folio)) return; node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); if (folio_test_large(folio)) atomic_sub(refs, &folio->_pincount); else refs *= GUP_PIN_COUNTING_BIAS; }
Checks if the folio is zero/large
-
if (folio_test_large(folio)) atomic_add(refs, &folio->_pincount); else folio_ref_add(folio, refs * (GUP_PIN_COUNTING_BIAS - 1))
maintaining reference counts. Part of policy logic most likely
-
if (unlikely((flags & FOLL_LONGTERM) && !folio_is_longterm_pinnable(folio))) { if (!put_devmap_managed_page_refs(&folio->page, refs)) folio_put_refs(folio, refs); return NULL;
checks for longterm folio pins.
-
if (unlikely(page_folio(page) != folio)) { if (!put_devmap_managed_page_refs(&folio->page, refs)) folio_put_refs(folio, refs); goto retry;
Uses prediction to check if a folio still points to the page. This is part of the function that tries to retrieve the folio to confirm that it is associated with a page.
-
folio = page_folio(page); if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) return NULL; if (unlikely(!folio_ref_try_add(folio, refs))) return NULL;
These increment the reference count for the folio since you're returning a reference of the folio. Important function so important internal logic subsequently
-
if (is_zero_page(page) || !folio_test_anon(folio)) continue; if (!folio_test_large(folio) || folio_test_hugetlb(folio))
Sanity checks for pinned pages wouldn't classify as policy logic but common sense pre-checks for the actual policy. But I think it's worth tagging this to gain a sense of what is not policy code
-
-
en.wikipedia.org en.wikipedia.org
-
- Sep 2024
-
www.ncbi.nlm.nih.gov www.ncbi.nlm.nih.gov
-
heterozygous c.G380A variant in GP1BA (NM_000173.7) (Figure 1B), resulting in a missense substitution of an arginine with a glutamine at position 127
Disease: platelet-type von Willebrand disease (PT-VWD)
Patient: 14 yo, Male
Variant: GP1BA NM_000173.7:c.389G>A p.(Arg127Gln), Heterozygous, Gain-of-Function (GOF)
Located in LRR5 domain of GP1BA
Family: Mother did not refer any bleeding symptoms (variant absent in mother) Father not available for collection of clinical history or platelet function testing
-
- Jul 2024
-
www.ncbi.nlm.nih.gov www.ncbi.nlm.nih.gov
-
RRID:IMSR_JAX:001
DOI: 10.1093/function/zqae024
Resource: RRID:IMSR_JAX:001929
Curator: @evieth
SciCrunch record: RRID:IMSR_JAX:001929
-
- Mar 2024
-
-
Abstract
结论:预测结果,好于MOST(MO估计系统地低估了湍流通量的大小,改善了与观测值和减小与观测通量偏离的总幅度。),不同地点的泛化能力 不足:不含物质通量,预测结果待提升,结果因稳定性而异常,不同季节的泛化能力,运用了不易获得的变量(找到最小观测集)
Tags
Annotators
-
- Feb 2024
-
docdrop.org docdrop.org
-
american mathematician alfred bartlett 00:01:12 in his long teaching career repeatedly said the greatest weakness of the human race is its inability to understand the exponential function
for - quote - Alfred Bartlett - exponential function
quote - Alfred Bartlett - The greatest weakness of the human race is its inability to understand the exponential function
-
- Jan 2024
-
greattransition.org greattransition.org
-
So organized, initiatives can collectively co-evolve and co-emerge into a purposeful transformation system oriented towards whole system change
for - quote - whole system change - bottom up whole system change - open function SRG/ Deep Humanity/ Indyweb / Indranet / TPF framework - definition - transformation catalyst
quote - (see below) - A transformation catalyst is an actor who - brings together numerous initiatives and actors around a shared and co-defined set of interests - with an action agenda in mind. - The TC stewards these actors through a set of three general (dialogue- and action-based) processes that can be adapted - to the unique context, needs, and interests - of each system and its players. - So organized, initiatives can collectively co-evolve and co-emerge - into a purposeful transformation system - oriented towards whole system change in a given context (which could happen - locally, - regionally, - bioregionally, or even more broadly - depending on the actors and orientations involved
-
-
mongoosejs.com mongoosejs.com
-
Instance methods Instances of Models are documents. Documents have many of their own built-in instance methods. We may also define our own custom document instance methods. // define a schema const animalSchema = new Schema({ name: String, type: String }, { // Assign a function to the "methods" object of our animalSchema through schema options. // By following this approach, there is no need to create a separate TS type to define the type of the instance functions. methods: { findSimilarTypes(cb) { return mongoose.model('Animal').find({ type: this.type }, cb); } } }); // Or, assign a function to the "methods" object of our animalSchema animalSchema.methods.findSimilarTypes = function(cb) { return mongoose.model('Animal').find({ type: this.type }, cb); }; Now all of our animal instances have a findSimilarTypes method available to them. const Animal = mongoose.model('Animal', animalSchema); const dog = new Animal({ type: 'dog' }); dog.findSimilarTypes((err, dogs) => { console.log(dogs); // woof }); Overwriting a default mongoose document method may lead to unpredictable results. See this for more details. The example above uses the Schema.methods object directly to save an instance method. You can also use the Schema.method() helper as described here. Do not declare methods using ES6 arrow functions (=>). Arrow functions explicitly prevent binding this, so your method will not have access to the document and the above examples will not work.
Certainly! Let's break down the provided code snippets:
1. What is it and why is it used?
In Mongoose, a schema is a blueprint for defining the structure of documents within a collection. When you define a schema, you can also attach methods to it. These methods become instance methods, meaning they are available on the individual documents (instances) created from that schema.
Instance methods are useful for encapsulating functionality related to a specific document or model instance. They allow you to define custom behavior that can be executed on a specific document. In the given example, the
findSimilarTypes
method is added to instances of theAnimal
model, making it easy to find other animals of the same type.2. Syntax:
Using
methods
object directly in the schema options:javascript const animalSchema = new Schema( { name: String, type: String }, { methods: { findSimilarTypes(cb) { return mongoose.model('Animal').find({ type: this.type }, cb); } } } );
Using
methods
object directly in the schema:javascript animalSchema.methods.findSimilarTypes = function(cb) { return mongoose.model('Animal').find({ type: this.type }, cb); };
Using
Schema.method()
helper:javascript animalSchema.method('findSimilarTypes', function(cb) { return mongoose.model('Animal').find({ type: this.type }, cb); });
3. Explanation in Simple Words with Examples:
Why it's Used:
Imagine you have a collection of animals in your database, and you want to find other animals of the same type. Instead of writing the same logic repeatedly, you can define a method that can be called on each animal instance to find similar types. This helps in keeping your code DRY (Don't Repeat Yourself) and makes it easier to maintain.
Example:
```javascript const mongoose = require('mongoose'); const { Schema } = mongoose;
// Define a schema with a custom instance method const animalSchema = new Schema({ name: String, type: String });
// Add a custom instance method to find similar types animalSchema.methods.findSimilarTypes = function(cb) { return mongoose.model('Animal').find({ type: this.type }, cb); };
// Create the Animal model using the schema const Animal = mongoose.model('Animal', animalSchema);
// Create an instance of Animal const dog = new Animal({ type: 'dog', name: 'Buddy' });
// Use the custom method to find similar types dog.findSimilarTypes((err, similarAnimals) => { console.log(similarAnimals); }); ```
In this example,
findSimilarTypes
is a custom instance method added to theAnimal
schema. When you create an instance of theAnimal
model (e.g., a dog), you can then callfindSimilarTypes
on that instance to find other animals with the same type. The method uses thethis.type
property, which refers to the type of the current animal instance. This allows you to easily reuse the logic for finding similar types across different instances of theAnimal
model.
Tags
Annotators
URL
-
-
gitlab.com gitlab.com
-
we should break down and MR into "Blocks"
-
- Dec 2023
-
superfastpython.com superfastpython.com
-
Measure Execution Time With time.thread_time()
The
time.thread_time()
reports the time that the current thread has been executing.The time begins or is zero when the current thread is first created.
Return the value (in fractional seconds) of the sum of the system and user CPU time of the current thread.
It is an equivalent value to the
time.process_time()
, except calculated at the scope of the current thread, not the current process.This value is calculated as the sum of the system time and the user time.
thread time = user time + system time
The reported time does not include sleep time.
This means if the thread is blocked by a call to
time.sleep()
or perhaps is suspended by the operating system, then this time is not included in the reported time. This is called a “thread-wide” or “thread-specific” time. -
Measure Execution Time With time.process_time()
The
time.process_time()
reports the time that the current process has been executed.The time begins or is zero when the current process is first created.
Calculated as the sum of the system time and the user time:
process time = user time + system time
System time is time that the CPU is spent executing system calls for the kernel (e.g. the operating system)
User time is time spent by the CPU executing calls in the program (e.g. your code).
When a program loops through an array, it is accumulating user CPU time. Conversely, when a program executes a system call such as
exec
orfork
, it is accumulating system CPU time.The reported time does not include sleep time.
This means if the process is blocked by a call to
time.sleep()
or perhaps is suspended by the operating system, then this time is not included in the reported time. This is called a “process-wide” time.As such, it only reports the time that the current process was executed since it was created by the operating system.
-
Measure Execution Time With time.perf_counter()
The time.perf_counter() function reports the value of a performance counter on the system.
It does not report the time since epoch like time.time().
Return the value (in fractional seconds) of a performance counter, i.e. a clock with the highest available resolution to measure a short duration. It does include time elapsed during sleep and is system-wide.
The returned value in seconds with fractional components (e.g. milliseconds and nanoseconds), provides a high-resolution timestamp.
Calculating the difference between two timestamps from the time.perf_counter() allows high-resolution execution time benchmarking, e.g. in the millisecond and nanosecond range.
The timestamp from the
time.perf_counter()
function is consistent, meaning that two durations can be compared relative to each other in a meaningful way.The
time.perf_counter()
function was introduced in Python version 3.3 with the intended use for short-duration benchmarking.The
perf_counter()
function was specifically designed to overcome the limitations of other time functions to ensure that the result is consistent across platforms and monotonic (always increasing).For accuracy, the
timeit
module uses thetime.perf_counter()
internally. -
Measure Execution Time With time.time()
The time.time() function reports the number of seconds since the epoch (epoch is January 1st 1970, which is used on Unix systems and beyond as an arbitrary fixed time in the past) as a floating point number.
The result is a floating point value, potentially offering fractions of a seconds (e.g. milliseconds), if the platforms support it.
The
time.time()
function is not perfect.It is possible for a subsequent call to
time.time()
to return a value in seconds less than the previous value, due to rounding.Note: even though the time is always returned as a floating point number, not all systems provide time with a better precision than 1 second. While this function normally returns non-decreasing values, it can return a lower value than a previous call if the system clock has been set back between the two calls.
-
-
stackoverflow.com stackoverflow.com
-
I was getting an error indicating I was using an invalid access_token. It turns out that I wasn't waiting for getLoginStatus to complete prior to making an API call
-
-
stackoverflow.com stackoverflow.com
-
stackoverflow.com stackoverflow.com
-
-
because the value isn't there yet. A promise is just a marker that it will be available at some point in the future. You cannot convert asynchronous code to synchronous, though. If you order a pizza, you get a receipt that tells you that you will have a pizza at some point in the future. You cannot treat that receipt as the pizza itself, though. When you get your number called you can "resolve" that receipt to a pizza. But what you're describing is trying to eat the receipt.
-
-
stackoverflow.com stackoverflow.com
- Nov 2023
-
markgrabe.substack.com markgrabe.substack.com
-
Grabe, Mark. “Student and Professional Note-Taking.” Substack newsletter. Mark’s Substack (blog), November 10, 2023. https://markgrabe.substack.com/p/student-and-professional-note-taking?publication_id=1857743&utm_campaign=email-post-title&r=77i35.
Educator Mark Grabe looks at some different forms of note taking with respect to learning compared lightly with note taking for productivity or knowledge management purposes.
Note taking for: - learning / sensemaking - personal knowledge management - productivity / projects - thesis creation/writing/other creative output (music, dance, etc.)
Not taken into account here is the diversity of cognitive abilities, extent of practice (those who've practiced at note taking for longer are likely to be better at it), or even neurodiversity, which becomes an additional layer (potentially noise) on top of the research methodologies.
-
- Sep 2023
-
www.reddit.com www.reddit.com
-
I don't know why I can't do Evergreen and Atomic Notes.. .t3_16r8k0b._2FCtq-QzlfuN-SwVMUZMM3 { --postTitle-VisitedLinkColor: #9b9b9b; --postTitleLink-VisitedLinkColor: #9b9b9b; --postBodyLink-VisitedLinkColor: #989898; }
reply to u/SouthernEremite at https://www.reddit.com/r/Zettelkasten/comments/16r8k0b/i_dont_know_why_i_cant_do_evergreen_and_atomic/
If you're not using your notes to create or write material and only using them as a form of sensemaking, then perhaps you don't need to put as much work or effort into the permanent notes portion of the work? Ask yourself: "Why are you taking notes? What purpose do they serve?" Is the form and level you're making them in serving those purposes? If not, work toward practicing to make those two align so that your notes are serving an actual purpose for you. Anything beyond this is make-work and you could spend your time more profitably somewhere else.
-
-
stackoverflow.com stackoverflow.com
-
def self.make_lazy(*methods) methods.each do |method| define_method method do |*args, &block| lazy.public_send(method, *args, &block) end end end
-
- Jun 2023
-
optimumpatientcareorg.sharepoint.com optimumpatientcareorg.sharepoint.com
-
Lung Function:
Sinthia's comment: Strictly speaking it should just be PEF, not PEFR, because flow is a rate.
-
-
docdrop.org docdrop.org
-
The function symbol notation is the least used notational system in jazz. As the namesuggests, this notation specifies the harmonic function of individual chords and evencomplete chord progressions. It has the potential of being useful to notate specificbehaviors of chords that may not—at least, not on the surface level—indicate that theybelong to a particular functional family of chords. As such, function symbols enable theperception of harmonic progressions from a more structural perspective. Function symbolsindicate neither the architecture nor the specific scale degrees of chords. This style ofnotation is more conceptual than it is representative of a specific surface event. The termssurface level and structural level are used to describe musical events and the degree oftheir importance. “Structural” events occur beneath the musical “surface” and areresponsible for the overall tonal, harmonic, and melodic forces controlling the piece.Function symbols use three labels: T for tonic-type chords, PD for predominant-typechords, and D for dominant-type chords.
-
harmonic functioncan be defined as a contextual feature that can be attributed to a chord, a family of chords,harmonic progressions, or even to complete melodic phrases. These features are uniquefor each of the following functions: the tonic, the predominant, and the dominant. Theinteraction between these three creates a system of functional tonality, which undergirdsthe structure of tonal jazz and common-practice music
-
Chapter 3 defines harmonic function
-
- Apr 2023
-
eco-exp2.netlify.app eco-exp2.netlify.app
-
car
F检验用到
-
dplyr
这是很常用的数据处理包,包含select,mutate等实用函数
-
-
beiner.substack.com beiner.substack.com
-
Daniel Schmachtenberger has spoken at length about the ‘generator functions’ of existential risk, in essence the deeper driving causes.
Definition - generator function of existential risk - the deeper driving cause of existential risk - two examples of deep causes - rivalrous dynamics - complicated systems consuming their complex substrate
Claim - Alexander Beiner claims that - the generator function of these generator functions is physicalism
-
-
www.semanticscholar.org www.semanticscholar.org
-
a random function f
a random function not many or several
-
- Mar 2023
-
www.typescriptlang.org www.typescriptlang.org
-
A Method Decorator is declared just before a method declaration.
-
- Dec 2022
-
www.reddit.com www.reddit.com
-
His note taking technique has a high distraction potential and is time consuming.
highlight from https://www.reddit.com/user/ManuelRodriguez331/ <br /> https://www.reddit.com/r/Zettelkasten/comments/zigwo3
Anecdotal evidence of how some might view zettelkasten note-taking practices, particularly when they have no end goal or needs in mind.
Form follows function
/comment/izs0u3b/?utm_source=reddit&utm_medium=web2x&context=3
-
-
www.zhihu.com www.zhihu.com
-
JavaScript中对 function 的参数进行重新赋值的影响?
Tags
Annotators
URL
-
- Nov 2022
-
nautil.us nautil.us
-
phytoncides, antibacterial and antimicrobial substances that trees and other plants release into the air to help them fight diseases and harmful organisms. When humans breathe in these substances—typically by spending time in nature—their health can improve. Across several studies, phytoncides have been shown to boost immune function, increase anticancer protein production, reduce stress hormones, improve mood, and help people relax.
I always feel better during and after a forest walk.
-
- Sep 2022
-
Local file Local file
-
IntertextsAs Jonathan Culler writes: “Liter-ary works are not to be consideredautonomous entities, ‘organicwholes,’ but as intertextual con-structs: sequences which havemeaning in relation to other textswhich they take up, cite, parody,refute, or generally transform.” ThePursuit of Signs (Ithaca, NY: CornelUniversity Press, 1981), 38.
Throughout Rewriting: How To Do Things With Texts (Utah State University Press, 2006) Joseph Harris presents highlighted sidebar presentations he labels "Intertexts".
They simultaneously serve the functions of footnotes, references, (pseudo-)pull quotes, and conversation with his own text. It's not frequently seen this way, but these intertexts serve the function of presenting his annotations of his own text to model these sorts of annotations and intertextuality which he hopes the reader (student) to be able to perform themselves. He explicitly places them in a visually forward position within the text rather than hiding them in the pages' footnotes or end notes where the audience he is addressing can't possibly miss them. In fact, the reader will be drawn to them above other parts of the text when doing a cursory flip through the book upon picking it up, a fact that underlines their importance in his book's thesis.
This really is a fantastic example of the marriage of form and function as well as modelling behavior.
cc: @remikalir
-
-
blog.saeloun.com blog.saeloun.com
- Aug 2022
-
juejin.cn juejin.cn
-
不得不说集成 SpringCloud Function 之后,消息的发送和接收又迈进了一个崭新的阶段,但 <functionName> + -in- + <index> 这样的配置规约我觉得让我有些难受......甚至目前我认为 3.1 之前被废弃的注解方式也许更适合我们开发使用
新趋势
-
- Jul 2022
-
-
randomFormat starts with a lowercase letter, making it accessible only to code in its own package (in other words, it's not exported).
function name starts with a lowercase
Tags
Annotators
URL
-
-
-
Any Go function can return multiple values. For more, see Effective Go.
function can return multiple values.
func Hello(name string) (string, error) { return name, nil }
-
- Jun 2022
-
www.php.net www.php.net
-
<?php$base = array("orange", "banana", "apple", "raspberry");$replacements = array(0 => "pineapple", 4 => "cherry");$replacements2 = array(0 => "grape");$basket = array_replace($base, $replacements, $replacements2);print_r($basket);?> The above example will output: Array ( [0] => grape [1] => banana [2] => apple [3] => raspberry [4] => cherry )
array_replace() replaces the values of array with values having the same keys in each of the following arrays.
-
- May 2022
-
www-ncbi-nlm-nih-gov.proxy-bloomu.klnpa.org www-ncbi-nlm-nih-gov.proxy-bloomu.klnpa.org
-
disrupts the biogenesis and processing of miRNAs with subsequent disruption in control of gene
effects miRNA
-
-
www.ncbi.nlm.nih.gov www.ncbi.nlm.nih.gov
-
DICER1 variants cause a hereditary cancer predisposition
-Gene: DICER1 -PMID: 29343557 -Inheritance Pattern: DICER1 is inherited as an autosomal dominant condition with decreased penetrance -Disease Entity: earlier onset disease, multisite disease, 0-2 site disease, cystic lung disease, familial disease, bilateral disease, stage IA/IB, bilateral disease -mutation: germline loss-of-function mutation, missense mutation, Intronic mutations, hotspot mutation, second somatic mutation, truncating mutations, biallelic mutation -zygosity: heterozygosity -Family History: -testing should be considered for those with a family history of DICER1-associated conditions so that appropriate surveillance can be undertaken. -Individuals at 50% risk of a germline pathogenic variant based on family history who do not pursue genetic testing should follow surveillance guidelines as -if they have a DICER1 mutation unless/until genetic testing confirms that they did not inherit the familial mutation When a pulmonary cyst is identified in a young child with a pathogenic germline -DICER1 variant or family history of a DICER1-associated condition, it should be assumed to be Type I PPB until proven otherwise
Other Information: -Case: Risk for most DICER1-associated neoplasms is highest in early childhood and decreases in adulthood -affected phenotype may simply result from probabilities of generating the characteristic “loss-of-function plus hotspot” two hit typical of a DICER1 syndrome neoplasm. -Caseprevioustesting: presymptomatic testing of a minor child, should be discussed and factored into the decision process, as some individuals may choose, and have the right to choose, not to know their/their child’s genetic status. -gnomAD: n/a
-
- Apr 2022
-
docdrop.org docdrop.org
-
All of the major books that were to follow – Sade /Fourier / Loyola (1997), The Pleasure of the Text (1975), RolandBarthes by Roland Barthes (1977), A Lover’s Discourse (1990), andCamera Lucida (1993) – are texts that are ‘plural’ and ‘broken’, andwhich are ‘constructed from non-totalizable fragments and fromexuberantly proliferating “details”’ (Bensmaïa, 1987: xxvii-xxxviii).In all of the above cases the fragment becomes the key unit ofcomposition, with each text structured around the arrangement ofmultiple (but non-totalisable) textual fragments.
Does the fact that Barthes uses a card index in his composition and organization influence the overall theme of his final works which could be described as "non-totalizable fragments"?
-
- Mar 2022
-
www.ncbi.nlm.nih.gov www.ncbi.nlm.nih.gov
-
he basic function of an anaesthesia machine is to prepare a gas mixture of precisely known, but variable composition. The gas mixture can then be delivered to a breathing system.
-
- Feb 2022
-
twitter.com twitter.com
-
Kevin Courtney #NEU💝NHS. (2022, January 5). Ventilation isn’t just for Covid.... ...It’s for Education This study looks at the impact of CO2 not just as a marker of pollution but as a pollutant in itself. It shows that as CO2 rises above 700/800 ppm cognitive function begins to be impaired https://dash.harvard.edu/bitstream/handle/1/27662232/4892924.pdf?sequence=1&fbclid=IwAR2kWIHIJfssa_sw72MD6W1hnkDvSm4bikK5FOLxwQxhjYLEYjfPCfzXz3E [Tweet]. @cyclingkev. https://twitter.com/cyclingkev/status/1478778857536860170
-
-
github.com github.com
-
There is nothing stopping you from creating store objects which scrapes XE for the current rates or just returns rand(2):
-
- Jan 2022
-
www.cdc.gov www.cdc.gov
-
French, G. (2021). Impact of Hospital Strain on Excess Deaths During the COVID-19 Pandemic—United States, July 2020–July 2021. MMWR. Morbidity and Mortality Weekly Report, 70. https://doi.org/10.15585/mmwr.mm7046a5
-
-
blog.atomist.com blog.atomist.com
-
My gut told me calling an async function from the setTimeout callback was a bad thing. Since the setTimeout machinery ignores the return value of the function, there is no way it was awaiting on it. This means that there will be an unhandled promise. An unhandled promise could mean problems if the function called in the callback takes a long time to complete or throws an error.
-
-
-
const originalUnhandledRejection = window.onunhandledrejection; window.onunhandledrejection = (e) => { console.log('we got exception, but the app has crashed', e); // or do Sentry.captureException(e); originalUnhandledRejection(e); }
Tags
Annotators
URL
-
-
www.npmjs.com www.npmjs.comco1
-
co(function* () { var result = yield Promise.resolve(true); return result;}).then(function (value) { console.log(value);}, function (err) { console.error(err.stack);});
-
-
www.nbcnewyork.com www.nbcnewyork.com
-
•. (n.d.). Even Mild COVID Infections Can Have Lasting Impacts Like ‘Chemo Brain,’ Study Finds. NBC New York. Retrieved 12 January 2022, from https://www.nbcnewyork.com/news/coronavirus/even-mild-covid-infections-can-have-lasting-impacts-like-chemo-brain-study-finds/3489958/
-
- Nov 2021
-
www.cell.com www.cell.com
-
ey use local computations to interpolate over task-rele-vant manifolds in a high-dimensional parameter space.
Tags
Annotators
URL
-
- Oct 2021
-
-
const fetchWithJSONHeaders = applyDefaults(fetch, { headers: { "Content-Type": "application/json" } }); const fetchWithTextHeaders = applyDefaults(fetch, { headers: { "Content-Type": "application/text" } }); // Fetch JSON content const response = await fetchWithJSONHeaders("/users", { method: "GET" });
-
-
trackjs.com trackjs.com
-
But there is a lot of things we didn’t handle: How do we pass function arguments through? How do we maintain scope (the value of this)? How do we get the return value? What if an error happens?
-
-
stackoverflow.com stackoverflow.com
-
A wrapper function is a design concept where a very minimal function is using another function to do it's "work" for it, sometimes using a slightly different set of arguments.
-
-
www.orgroam.com www.orgroam.com
-
org-roam-dailies-goto-next-note
-
org-roam-dailies-goto-previous-note
-
org-roam-dailies-find-directory
-
org-roam-dailies-goto-date
-
org-roam-dailies-capture-date
-
org-roam-dailies-goto-yesterday
-
org-roam-dailies-capture-yesterday
-
org-roam-dailies-goto-today
-
org-roam-dailies-capture-today
-
org-roam-alias-remove
-
org-roam-buffer-display-dedicated
-
org-roam-buffer-toggle
Tags
Annotators
URL
-
-
blog.gdeltproject.org blog.gdeltproject.org
-
BigQuery + UDF = Identifying The Earliest Glimmers Of Covid-19 – The GDELT Project. (n.d.). Retrieved May 14, 2021, from https://blog.gdeltproject.org/bigquery-udf-identifying-the-earliest-glimmers-of-covid-19/
-
- Jul 2021
-
-
annotationCou
function to count annotations
-
-
www.cell.com www.cell.com
-
Keerthivasan, S., Şenbabaoğlu, Y., Martinez-Martin, N., Husain, B., Verschueren, E., Wong, A., Yang, Y. A., Sun, Y., Pham, V., Hinkle, T., Oei, Y., Madireddi, S., Corpuz, R., Tam, L., Carlisle, S., Roose-Girma, M., Modrusan, Z., Ye, Z., Koerber, J. T., & Turley, S. J. (2021). Homeostatic functions of monocytes and interstitial lung macrophages are regulated via collagen domain-binding receptor LAIR1. Immunity, 54(7), 1511-1526.e8. https://doi.org/10.1016/j.immuni.2021.06.012
-
-
github.com github.com
-
this happens with getClient and setClient because it is a svelte context which is only available at component initialization (construction) and cannot be in an event handler.
-
- Jun 2021
-
www.bmj.com www.bmj.com
-
Darby, Alistair C., and Julian A. Hiscox. ‘Covid-19: Variants and Vaccination’. BMJ 372 (23 March 2021): n771. https://doi.org/10.1136/bmj.n771.
-
-
www.technologyreview.com www.technologyreview.com
-
The problem is, algorithms were never designed to handle such tough choices. They are built to pursue a single mathematical goal, such as maximizing the number of soldiers’ lives saved or minimizing the number of civilian deaths. When you start dealing with multiple, often competing, objectives or try to account for intangibles like “freedom” and “well-being,” a satisfactory mathematical solution doesn’t always exist.
We do better with algorithms where the utility function can be expressed mathematically. When we try to design for utility/goals that include human values, it's much more difficult.
-
-
www.postgresql.org www.postgresql.org
-
json_array_elements_text ( json ) → setof text jsonb_array_elements_text ( jsonb ) → setof text Expands the top-level JSON array into a set of text values. select * from json_array_elements_text('["foo", "bar"]') → value ----------- foo bar
-
-
dba.stackexchange.com dba.stackexchange.com
-
The clean way to call a set-returning function is LEFT [OUTER] JOIN LATERAL. This includes rows without children. To exclude those, change to a [INNER] JOIN LATERAL
-
-
www.apollographql.com www.apollographql.com
-
graphqlSync is a relatively recent addition to GraphQL.js that lets you execute a query that you know is going to return synchronously and get the result right away, rather than getting a promise. Since we know that introspection won’t require calling any asynchronous resources, we can safely use it here.
-
- May 2021
-
github.com github.com
-
fetch: fetcher
Personally, I don't like how the local/custom/wrapper version of
fetch
is calledfetcher
. I feel like{prefix}_fetch
orfetch_{prefix}
would have been better.
-
-
kit.svelte.dev kit.svelte.dev
-
This function runs on every request, for both pages and endpoints, and determines the response. It receives the request object and a function called resolve, which invokes SvelteKit's router and generates a response accordingly.
-
- Apr 2021
-
en.wikipedia.org en.wikipedia.org
-
The role of the terminal emulator process is:
Shows the relationship between a "terminal emulator" and a pseudoterminal, as alluded to in the intro:
is a pair of pseudo-devices, one of which, the slave, emulates a hardware text terminal device, the other of which, the master, provides the means by which a terminal emulator process controls the slave.
-
-
en.wikipedia.org en.wikipedia.org
-
Example
This clarifies that (one of) the terminal's responsibility is:
- provides line editing
-
-
en.wikipedia.org en.wikipedia.org
-
Other physicists and mathematicians at the turn of the century came close to arriving at what is currently known as spacetime. Einstein himself noted, that with so many people unraveling separate pieces of the puzzle, "the special theory of relativity, if we regard its development in retrospect, was ripe for discovery in 1905."
Interesting. This acts as evidence for the hypothesis that environments/conditions are powerful forcing functions.
It also acts as evidence against the argument of the "lone genius".
-
- Mar 2021
-
bugs.ruby-lang.org bugs.ruby-lang.org
-
Would it be desirable to specify the new object in a block? That would make it somewhat symmetrical to how Hash.new takes a block as a default value.
-
-
trailblazer.to trailblazer.to
-
This could be an operation, a workflow, or hand-baked Ruby code completely unrelated to Trailblazer.
-
-
github.com github.com
-
Or if you need to change the way the string is assembled, you can provide a proc, for example: if defined?(BetterErrors) BetterErrors.editor = proc { |file, line| "vscode://file/%{file}:%{line}" % { file: URI.encode_www_form_component(file), line: line } } end
-
-
psyarxiv.com psyarxiv.com
-
Davies, Catherine, Alexandra Hendry, Shannon P. Gibson, Teodora Gliga, Michelle McGillion, and Nayeli Gonzalez-Gomez. ‘Early Childhood Education and Care (ECEC) during COVID-19 Boosts Growth in Language and Executive Function’. PsyArXiv, 10 March 2021. https://doi.org/10.31234/osf.io/74gkz.
-
-
medium.com medium.com
-
There’s several benefits to splitting code into multiple packages, whether it be a library, micro-services or micro-frontends.
-
-
www.theatlantic.com www.theatlantic.com
-
Cushing, E. (2021, March 8). Late-Stage Pandemic Is Messing With Your Brain. The Atlantic. https://www.theatlantic.com/health/archive/2021/03/what-pandemic-doing-our-brains/618221/
-
-
trailblazer.to trailblazer.to
-
Suppose that the validate task was getting quite complex and bloated. When writing “normal” Ruby, you’d break up one method into several. In Trailblazer, that’s when you introduce a new, smaller activity.
-
- Feb 2021
-
www.coursera.org www.coursera.org
-
Attribution requires knowledge of two facts: who holds the asset, and who has created it and is party to the contract.
Basic functions of blockchain: Attribution
-
-
en.wikipedia.org en.wikipedia.org
-
Though rarer in computer science, one can use category theory directly, which defines a monad as a functor with two additional natural transformations. So to begin, a structure requires a higher-order function (or "functional") named map to qualify as a functor:
rare in computer science using category theory directly in computer science What other areas of math can be used / are rare to use directly in computer science?
-
-
github.com github.com
-
# Set the model name to change the field names generated by the Rails form helpers def self.model_name=(name) @_model_name = ActiveModel::Name.new(self, nil, name) end
-
-
en.wikipedia.org en.wikipedia.org
-
The central ideas of this design pattern closely mirror the semantics of first-class functions and higher-order functions in functional programming languages. Specifically, the invoker object is a higher-order function of which the command object is a first-class argument.
-
-
github.com github.com
-
def self.attribute(name, type = ActiveModel::Type::Value.new, **options) super attribute_type = attribute_types[name.to_s] # Add the ? method for boolean attributes alias_boolean(name) if attribute_type.is_a?(ActiveModel::Type::Boolean) # store date attribute names so we can merge the params during # initialization date_attributes << name if attribute_type.class.in?(DATE_TYPES) end
-
- Jan 2021
-
developingchild.harvard.edu developingchild.harvard.edu
-
Executive Function & Self-Regulation
article on executive function and self-reg
-
-
developer.mozilla.org developer.mozilla.org
-
While custom iterators are a useful tool, their creation requires careful programming due to the need to explicitly maintain their internal state. Generator functions provide a powerful alternative: they allow you to define an iterative algorithm by writing a single function whose execution is not continuous. Generator functions are written using the function* syntax.
-
-
stackoverflow.com stackoverflow.com
-
It works much like a normal AJAX request except instead of calling an anonymous function, we have to use named functions.
-
- Dec 2020
-
chem.libretexts.org chem.libretexts.org
-
Nodes A wave function node occurs at points where the wave function is zero and changes signs. The electron has zero probability of being located at a node.
Nodes
Tags
Annotators
URL
-
-
thecodebarbarian.com thecodebarbarian.com
-
Remember that async functions always return promises. This promise rejects if any uncaught error occurs in the function. If your async function body returns a promise that rejects, the returned promise will reject too.
-
- Nov 2020
-
github.com github.com
-
// DO NOT INLINE this variable. For backward compatibility, foundations take a Partial<MDCFooAdapter>. // To ensure we don't accidentally omit any methods, we need a separate, strongly typed adapter variable.
I wish I understood what they meant and why this is necessary
-
-
www.plymouth.edu www.plymouth.edu
-
proteases
Is a group of enzymes whose catalytic function is hydrolyzing peptide bonds of proteins. Also referred to as proteolytic enzymes or even proteinases.
-
-
stackoverflow.com stackoverflow.com
-
I have created a thin wrapper around fetch() with many improvements if you are using a purely json REST API:
-
()
-
-
stackoverflow.com stackoverflow.com
-
news.ycombinator.com news.ycombinator.com
-
Frontend frameworks are a positive sum game! Svelte has no monopoly on the compiler paradigm either. Just like I think React is worth learning for the mental model it imparts, where UI is a (pure) function of state, I think the frontend framework-as-compiler paradigm is worth understanding. We're going to see a lot more of it because the tradeoffs are fantastic, to where it'll be a boring talking point before we know it.
-
-
github.com github.com
-
// Rewrite submit function form.submit = () => { const result = originalSubmit.call(form)
-
-
en.wiktionary.org en.wiktionary.org
-
διαδικασία Definition from Wiktionary, the free dictionary Jump to navigation Jump to search Greek
Greek Noun
διαδικασία • (diadikasía) f (plural διαδικασίες)
- procedure, process, method, protocol
- (computing) function, subroutine, procedure
-
- Oct 2020
-
stackoverflow.com stackoverflow.com
-
If you don't like to create an extra function and remove the items 'inline'
-
-
stackoverflow.com stackoverflow.com
-
Final Form makes the assumption that your validation functions are "pure" or "idempotent", i.e. will always return the same result when given the same values. This is why it doesn't run the synchronous validation again (just to double check) before allowing the submission: because it's already stored the results of the last time it ran it.
-
-
github.com github.com
-
Another example:
const expensiveOperation = async (value) => { // return Promise.resolve(value) // console.log('value:', value) await sleep(1000) console.log('expensiveOperation: value:', value, 'finished') return value } var expensiveOperationDebounce = debounce(expensiveOperation, 100); // for (let num of [1, 2]) { // expensiveOperationDebounce(num).then(value => { // console.log(value) // }) // } (async () => { await sleep(0 ); console.log(await expensiveOperationDebounce(1)) })(); (async () => { await sleep(200 ); console.log(await expensiveOperationDebounce(2)) })(); (async () => { await sleep(1300); console.log(await expensiveOperationDebounce(3)) })(); // setTimeout(async () => { // console.log(await expensiveOperationDebounce(3)) // }, 1300)
Outputs: 1, 2, 3
Why, if I change it to:
(async () => { await sleep(0 ); console.log(await expensiveOperationDebounce(1)) })(); (async () => { await sleep(200 ); console.log(await expensiveOperationDebounce(2)) })(); (async () => { await sleep(1100); console.log(await expensiveOperationDebounce(3)) })();
Does it only output 2, 3?
-
-
github.com github.com
-
Methods have fixed arities to support auto-currying.
-
// `lodash/padStart` accepts an optional `chars` param. _.padStart('a', 3, '-') // ➜ '--a' // `lodash/fp/padStart` does not. fp.padStart(3)('a'); // ➜ ' a'
-
The lodash/fp module promotes a more functional programming (FP) friendly style by exporting an instance of lodash with its methods wrapped to produce immutable auto-curried iteratee-first data-last methods.
-
-
en.wikipedia.org en.wikipedia.org
-
One of the significant differences between the two is that a call to a partially applied function returns the result right away, not another function down the currying chain; this distinction can be illustrated clearly for functions whose arity is greater than two.
-
Currying and partial function application are often conflated.
-
-
stackoverflow.com stackoverflow.com
-
It looks like you accidentally passed resolve() (immediately invoking the function) directly to setTimeout rather than passing a function to invoke it. So it was being resolved immediately instead of after a 1000 ms delay as intended.
I guess this is the "immediately invoked function" problem.
Not to be confused with: immediately invoked function expression. (Since it is a regular named function and not a function expression.)
-
You should not create a new debounce function on every render with: return new Promise(resolve => { debounce(() => resolve(this.getIsNameUnique(name)), 2000); }); Instead you should just wrap your whole function isNameUnique with the debounce (see my sandbox). By creating a new debounce function on every hit, it cannot 'remember' that is was called or that is will be called again. This will prevent the debouncing.
-
-
final-form.org final-form.org
-
If you define a variable outside of your form, you can then set the value of that variable to the handleSubmit function that 🏁 React Final Form gives you, and then you can call that function from outside of the form.
-
-
med.libretexts.org med.libretexts.org
-
The results of a DEXA scan are most often reported as T-scores. A T-score compares a person’s bone density to the average peak bone density of a healthy 30-year-old population of the same gender. A T-score of −1.0 or above indicates normal bone density. A person with a T-score between −1.0 and −2.5 has low bone density, which is a condition referred to as osteopenia. A person with a T-score of −2.5 or below is diagnosed with osteoporosis.
T score levels for bone density.
-
-
medium.com medium.com
-
reduce is a higher-order function which takes two values
-
-
med.libretexts.org med.libretexts.org
-
In the third step of bone remodeling, the site is prepared for building. In this stage, sugars and proteins accumulate along the bone’s surface, forming a cement line which acts to form a strong bond between the old bone and the new bone that will be made. These first three steps take approximately two to three weeks to complete.
Bone remodeling process.
-
In adulthood, our bones stop growing and modeling, but continue to go through a process of bone remodeling.
I would challenge that fact.
-
Bone tissue cells include osteoprogenitor cells, osteoblasts, osteoclasts, and osteocytes. The osteoprogenitor cells are cells that have not matured yet. Once they are stimulated, some will become osteoblasts, the bone builders, and others will become osteoclasts, the cells that break bone down. Osteocytes are the most abundant cells in bone tissue. Osteocytes are star-shaped cells that are connected throughout the bone and exchange nutrients from bones to the blood and lymph.
The Asteo Class of Bone Tissue
-
Your bones are stronger than reinforced concrete. Bone tissue is a composite of fibrous collagen strands that resemble the steel rebar in concrete and a hardened mineralized matrix that contains large amounts of calcium, just like concrete.
What Is Bone?
-
-
github.com github.com
-
In React 0.12 time frame we did a bunch of small changes to how key, ref and defaultProps works. Particularly, they get resolved early on in the React.createElement(...) call. This made sense when everything was classes, but since then, we've introduced function components. Hooks have also make function components more prevalent. It might be time to reevaluate some of those designs to simplify things (at least for function components).
-
-
www.npmjs.com www.npmjs.comhyperx1
-
Return a tagged template function hx from a hyperscript-style factory function h
Tags
Annotators
URL
-
-
www.statnews.com www.statnews.com
-
Long after a Covid-19 infection, mental and neurological effects smolder. (2020, August 12). STAT. https://www.statnews.com/2020/08/12/after-covid19-mental-neurological-effects-smolder/
-
-
github.com github.com
- Sep 2020
-
stackoverflow.com stackoverflow.com
-
stackoverflow.com stackoverflow.com
-
setContext must be called synchronously during component initialization. That is, from the root of the <script> tag
-
-
github.com github.com
-
github.com github.com
-
It looks like the issue stems from having "svelte" as a dependency instead of a devDependencies in package.json within the sapper project. This causes import 'svelte' to load the rollup excluded npm package's set_current_component instead of from within the sapper generated server.js.
-
-
svelte.dev svelte.dev
-
final-form.org final-form.org
-
By default, in order to allow inline fat-arrow validation functions, the field will not rerender if you change your validation function to an alternate function that has a different behavior. If you need your field to rerender with a new validation function, you will need to update another prop on the Field, such as key
-
-
stackoverflow.com stackoverflow.com
-
function* enumerate(iterable) { let i = 0; for (const x of iterable) { yield [i, x]; i++; } } for (const [i, obj] of enumerate(myArray)) { console.log(i, obj); }
-
-
developer.mozilla.org developer.mozilla.org
-
www.coreycleary.me www.coreycleary.me
-
here I wrapped the function call in an IIFE - that's what that (async () => {....})() is if you've never seen it. This is simply because we need to wrap the await call in a function that uses the async keyword, and we also want to "immediately invoke" the function (IIFE = "Immediately Invoked Function Execution") in order to call it.
-
-
developer.mozilla.org developer.mozilla.org
-
Here we store the three Promise objects in variables, which has the effect of setting off their associated processes all running simultaneously. Next, we await their results — because the promises all started processing at essentially the same time, the promises will all fulfill at the same time
-
By only adding the necessary handling when the function is declared async, the JavaScript engine can optimize your program for you.
-
-
-
$: (async() => filtered = await getItems())();
-
-
reactjs.org reactjs.org
-
functions present a problem again because there is no reliable way to compare two functions to see if they are semantically equivalent.
-
-
github.com github.com
-
Viechtbauer, W. (2020). Wviechtb/forest_emojis [R]. https://github.com/wviechtb/forest_emojis (Original work published 2020)
-
-
ccforum.biomedcentral.com ccforum.biomedcentral.com
-
Hupf, J., Mustroph, J., Hanses, F., Evert, K., Maier, L. S., & Jungbauer, C. G. (2020). RNA-expression of adrenomedullin is increased in patients with severe COVID-19. Critical Care, 24(1), 527. https://doi.org/10.1186/s13054-020-03246-1
-
- Jul 2020
-
osf.io osf.io
-
Bogliacino, F., codagnone, cristiano, Montealegre, F., Folkvord, F., Gómez, C. E., Charris, R. A., Liva, G., Villanueva, F. L., & Veltri, G. A. (2020). Negative shocks predict change in cognitive function and preferences: Assessing the negative affect and stress hypothesis in the context of the COVID-19 pandemic and the lockdown mitigation strategy [Preprint]. SocArXiv. https://doi.org/10.31235/osf.io/qhkf9
-
-
www.hspsweden.eu www.hspsweden.eu
-
Dessa ingångar eller portar, som han kallar dem, kan alla användas för att ta oss tillbaka till nuet, där inga problem finns. Det är bara i nuet som vi hittar glädjen och kan omfamna vårt sanna jag.
Här verkar han utgå från att det faktiskt finns minst fyra "portar" vilket stämmer överens med Carl Jungs teori att den MINST utvecklade funktionen hos en individ är "porten till det undermedvetna" varigenom all mänsklig transformation sker.
Man går helt enkelt in genom sitt MINST utvecklade personlighetsdrag och kan då nå sitt meditativa jag som ligger under och bakom alla personlighetsdrag. De olika personlighetsdragen är tänkande (eld), kännande (vatten), kroppsliga intryck (jord) respektive intuition (luft).
Det är i sin tur samma sak som indiankulturernas medicinhjuls fyra väderstreck med människan i mitten.
-
Dessa ingångar eller portar, som han kallar dem, kan alla användas för att ta oss tillbaka till nuet, där inga problem finns.
Men här skriver de att han pekar på FLERA portar. Kanske utgår Eckhart Tolle från Jungs funktioner och säger att det finns fyra - tänkande, kännande, intuition och sinnesintryck. Bakom/under dessa finns den fullt ut levande människan som gör att vi upplever glädjen och kan omfamna vårt sanna jag.
Exakt samma fyra mänskliga medvetande-funktioner som återfinns i indiankulturernas medicinhjul som de fyra väderstrecken och i den antika elementlärans vatten (känsla), eld (tänkande), luft (intuition) och jord (sinnesintryck)
-
Eckhart Tolle klargör också att kroppen är nyckeln till vår inre ocean av kunskap och vägen till sinnesro.
Ja, för Eckhart Tolle som sannolikt är dominant iNtuitiv och därefter Thinker (exempelvis INTJ eller INFJ som båda har dominant introverterad intuition) är extraverterad Sensing, dvs den fysiska världens sinnesintryck på kroppen den minst utvecklade funktionen (i alla fall den fjärde sämsta). Carl Jung sa att porten till det undermedvetna, och därför all verklig personlig transformation, är den minst utvecklade funktionen.
Så hade Eckhart Tolle varit som min brorsa, en ESTP, så hade introverterad intiuition (en av mina bästa förmågor exempelvis, som INTJ) varit nyckeln till vår "inre ocean av kunskap och vägen till sinnesro"
-
- Jun 2020
-
psyarxiv.com psyarxiv.com
-
Zhang, W., Gao, F., Gross, J., Shrum, L. J., & Hayne, H. (2020). How Does Social Distancing During COVID-19 Affect Negative Moods and Memory? [Preprint]. PsyArXiv. https://doi.org/10.31234/osf.io/67rhf
-
-
psyarxiv.com psyarxiv.com
-
Law, R. (2020, June 17). The cortisol awakening response predicts a same-day index of executive function in healthy young adults. https://doi.org/10.31234/osf.io/58yeb
-
-
myronmars.to myronmars.to
-
return super(scope, &block) unless scope == :all
-
-
-
Della Rossa, F., & DeLellis, P. (2020). Stochastic master stability function for noisy complex networks. Physical Review E, 101(5), 052211. https://doi.org/10.1103/PhysRevE.101.052211
-
-
yunicsolutions.com yunicsolutions.com
-
14 Essential Functions of the Human Resource Department Leave a Comment / Blog Contact According to Storey (1995), HRM is a distinctive approach to employment management which seeks to achieve competitive advantage through the strategic deployment of a highly committed and capable workforce, using an integrated array of cultural, structural and personnel techniques. An efficiently run human resources department can provide your organization with structure and the ability to meet business needs through managing your company’s most valuable resources – its employees. There are several HR disciplines, but HR practitioners in each discipline may perform more than one of the more than six essential functions. In small businesses without a dedicated HR department, it’s possible to achieve the same level of efficiency and workforce management through outsourcing HR functions or joining a professional employer organization. In this article, we will go over the 14 essential Functions of Human Resource departments and explain how they help move the organization forward. These functions are as follows: Human Resource Planning The first function of HR is all about knowing the future needs of the organization. What kind of people does the organization need, and how many? Knowing this will shape the recruitment, selection, performance management, learning and development, and all other HR functions. Human resources planning is similar to workforce planning. Both focus on where the organization is today and what it needs to be successful in the future. Recruitment And Selection Recruitment process outsourcing is the process of captivating, screening, and selecting potential and qualified candidates based on objective criteria for a particular job. The goal of this process is to attract qualified applicants and to encourage the unqualified applicants to opt themselves out. The recruitment and selection process is very important to every organization because it reduces the costs of mistakes such as engaging incompetent, unmotivated, and under-qualified employees. Firing the unqualified candidate and hiring the new employee is again an expensive process. Training and Development Employers must provide employees with the tools necessary for their success which, in many cases, means giving new employees extensive orientation training to help them transition into a new organizational culture. Many HR departments also provide leadership training and professional development. Corporate training may be required of newly hired and promoted supervisors and managers on topics such as performance management and how to handle employee relations matters at the department level. Employer-Employee Relations In a unionized work environment, the employee and labor relations functions of HR may be combined and handled by one specialist or be entirely separate functions managed by two HR specialists with specific expertise in each area. Employee relations is the HR discipline concerned with strengthening the employer-employee relationship through measuring job satisfaction, employee engagement and resolving workplace conflict. Labor relations functions may include developing management response to union organizing campaigns, negotiating collective bargaining agreements and rendering interpretations of labor union contract issues. Compensation and Benefits Like employee and labor relations, the compensation and benefits functions of HR often can be handled by one HR specialist with dual expertise. On the compensation side, the HR functions include setting compensation structures and evaluating competitive pay practices. A comp and benefits specialist also may negotiate group health coverage rates with insurers and coordinate activities with the retirement savings fund administrator. Payroll can be a component of the compensation and benefits section of HR; however, in many cases, employers outsource such administrative functions as payroll. Labor Law Compliance Compliance with labor and employment laws is a critical HR function. Noncompliance can result in workplace complaints based on unfair employment practices, unsafe working conditions and general dissatisfaction with working conditions that can affect productivity and ultimately, profitability. HR staff must be aware of federal and state employment laws such as Title VII of the Civil Rights Act, the Fair Labor Standards Act, the National Labor Relations Act and many other rules and regulations. Recognition and Rewards Rewards & Recognitions are a way of making employees feel worthy of their work as appreciation serves as the best motivation. recognitions and Rewards can be monetary or non-monetary. A task or goal or performance linked to rewards, and further evaluation and reward is usually the cycle.Rewards could be a sponsored vacation, raise in salary, bonus, performance-based pay. Recognitions could be a higher post, job security, growth opportunities, a good work environment, accolades, and offering credibility across the company. Rewards & Recognitions not only motivates employees but also helps to retain them. Long-term advantages of rewards and recognition are – Rewards and recognitions have a direct impact on attrition rates.Performance can be enhanced through rewards & recognitions.Higher loyalty standards can be established.Better teamwork can be best explored.Absenteeism and negative behavior can be curtailed.Employees are engaged and enjoy their work. Health and safety Health & Safety is a prime HR function in the entire landscape of Human Resource Management. Employees spend most of their time at work and to bring them a safe environment, that is amicable and guarded is the prerogative of the organization. Organizations should consider- Safety and health policies according to industry benchmarks.Safety and health training from time to time.Sexual harassment act for women employees.Health initiatives within the company premises. Companies should focus not only on a plush working environment too. From the health perspective, various drives, seminars and workshops are an undisputed need in the interest of the employees. Free health camps and paid medical insurance policies for employee& family are some of the initiatives companies can engage in for their employee health & safety. Maintaining Good Working Conditions It is the responsibility of the human resource management to provide good working conditions to the employee so that they may like the workplace and the work environment. It is the fundamental duty of the HR department to motivate the employees. The study has been found that employees don’t contribute to the goals of the organization as much as they can. This is because of the lack of motivation. Human resource management should come up with a system to provide financial and non-financial benefits to the employee from the various departments. Employee welfare is another concept which should be managed by HR team. Employee welfare promotes job satisfaction. Administrative Responsibilities Another function of HR is its administrative responsibility. These include personnel procedures and Human Resource Information Systems. Personnel procedures involve the handling of promotions, relocations, discipline, performance improvement, illness, regulations, cultural and racial diversity, unwanted intimacies, bullying, and so on. For each of these situations, policies and procedures need to be developed and followed to successfully comply with the requests, or overcome these challenges. Career/Succession Planning As an HR function, succession planning is an initiative towards monitoring and working an existing employees’ growth path such that he can be promoted within. Companies can identify bright and promising employees inside an organization and work on developing their growth path. Employees who feel assured of a promising work environment within the company will not leave. Companies should work out strategies for individual development, engage employees in grooming and challenging activities to develop them towards a higher role. Showing employees how their personal ambition can align with the future of the company helps to engage and retain them. For the organization, there are the benefits of better succession planning, higher productivity, and a stronger employer brand. Industrial Relations Industrial Relations as a HR function is primarily practised in manufacturing & production units. Unions rule industrial units with a motive that collectively speaks of the goodwill of the employees. A company in the production & manufacturing domain should have prevalent Industrial Relations practises and should continuously engage in talks with unions to maintain an amicable situation. Industrial Relations aims towards a continuous production process, reducing production time and resource wastage, reduce serious disputes including strikes and protests, better and safer working conditions, meeting wage standards and expectation. Industrial Relations if handled sensitively can avoid lawsuits, protests, walkouts, loss of production time, and money.Unionization is still very prevalent in Europe. In 2015, 92% of employees in Iceland were a member of a union, followed by Sweden (67%), Belgium (55%), Italy (37%), Ireland (27%) and Canada (27%). Maintaining good relations with unions will help to spot and resolve potential conflicts quickly and will also be beneficial in more difficult economic times when layoffs or other actions are required. Performance Management Performance management is essential in ensuring that workers stay productive and engaged. Good performance management involves good leadership, clear goal-setting, and open feedback. Performance management tools include the (bi)annual performance review, in which the employee is reviewed by his/her manager. It also includes 360-degree feedback tools in which peers, managers, subordinates, and sometimes even customers review the employee’s performance. These kinds of tools can be very helpful in providing feedback. Ideally, employees should be reviewed on a 360-degree scale, where peers, subordinates, seniors and even customers offer feedback on an employee’s performance. The performance management system is effective in identifying the gaps in performances that can be filled with training and skill enhancement. Performance management system, in the long run, is a profitable affair. Clear accountabilities, better productivity, reduced conflicts, job satisfaction and enhanced productivity levels all, in the long run, materialize to lesser attrition. Function Evaluation Function evaluation is a more technical role of HR that involves comparing various functions in terms of qualification, the quality, and availability of workers, job location, working times, the economic situation, job responsibility, and how much value this job adds to the organization. The idea behind function evaluation is that similar jobs should be rewarded similarly. There are different ways of internally ranking functions: Ranking Method: a method in which subject matter experts rank functions in terms of how much they contribute to the organization as a whole. Functions are paired and raters have to decide which one is more valuable. This is done with all functions and based on the outcome, a ranking is established. Classification method: jobs can also be classified in different categories using classification methods. In this case, jobs are categorized and then ranked within these categories to come up with a ranking. Categorizations can include education, experience, the degree of specialized skills needed to do the job, the degree to which these skills are in-demand, and so on. Points method: jobs are categorized according to the factors the organization believes contribute most to its success. Points are then awarded to each category for every job. These categories can include key competencies, like problem-solving, technical knowledge, communication and influencing skills, innovative capability, business acumen, and so on. These competencies will differ per organization Personal method: in this method, the job itself is not evaluated but the person doing the job is. Here, employees are rewarded based on their personal skills and competencies. Apart from the above, the HR function involves managing change, technology, innovation, and diversity. It is no longer confined to the culture or ethos of any single organization; its keynote is a cross-fertilization of ideas from different organizations. Periodic social audits of HR functions are considered essential. HR professionals have an all-encompassing role. They are required to have a thorough knowledge of the organization and its intricacies and complexities. The ultimate goal of every HR manager should be to develop a linkage between the employee and the organization because the employee’s commitment to the organization is crucial. The first and foremost role of HR functionary is to impart continuous education to employees about the changes and challenges facing the country in general and their organization in particular. The employees should know about their balance sheet, sales progress, diversification plans, restructuring plans, sharp price movements, turnover, and all such details. The HR professionals should impart education to all employees through small booklets, video films, and lectures. I hope this article helped you in finding out what main purposes do the HR have and you found out ways to accomplish those. All the best!
The function of Human Resource department is one of the most important parts of a business, HR Department helps the business in managing employees. These are the 14 essential Functions of the Human Resource department.
-