summaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
authorluxagraf <sng@luxagraf.net>2020-02-16 15:57:40 -0500
committerluxagraf <sng@luxagraf.net>2020-02-16 15:57:40 -0500
commit2a9538f7711dcea71ba5702ca94a9d9c00287ea4 (patch)
treee0fe5ea7c909b01e10bf6a8dd4abcb619b6924c9 /bin
parent3165ef5abdb298108efa2ba3d339f70ca966315a (diff)
added the rest of bin
Diffstat (limited to 'bin')
-rwxr-xr-xbin/cal.sh39
-rwxr-xr-xbin/csvtommd.rb18
-rw-r--r--bin/daily-rsync.sh19
-rwxr-xr-xbin/deflac.sh5
-rw-r--r--bin/devil.dict.dzbin0 -> 165369 bytes
-rw-r--r--bin/devil.index1008
-rw-r--r--bin/dictionary.zipbin0 -> 29607228 bytes
-rwxr-xr-xbin/download-images.py30
-rw-r--r--bin/download-videos.py29
-rwxr-xr-xbin/dropkick.sh66
-rwxr-xr-xbin/facebook.sh2
-rwxr-xr-xbin/fastmail.sh2
-rwxr-xr-xbin/generate_video_poster.sh7
-rwxr-xr-xbin/get_links.sh6
-rwxr-xr-xbin/gmail.sh2
-rwxr-xr-xbin/gvim2
-rwxr-xr-xbin/hangouts.sh2
-rwxr-xr-xbin/havedone.sh11
-rwxr-xr-xbin/havedonegit.sh10
-rw-r--r--bin/html2text.py914
-rw-r--r--bin/importer.py351
-rwxr-xr-xbin/instagram.sh2
-rwxr-xr-xbin/kanbanscripts/addtask.sh2
-rwxr-xr-xbin/kanbanscripts/alltasks.py62
-rwxr-xr-xbin/kanbanscripts/alltasks_by_type.py56
-rwxr-xr-xbin/kanbanscripts/havedone.sh11
-rwxr-xr-xbin/kanbanscripts/havedonegit.sh10
-rwxr-xr-xbin/kanbanscripts/review.py51
-rwxr-xr-xbin/kanbanscripts/showall.sh4
-rwxr-xr-xbin/kanbanscripts/showalltype.sh11
-rwxr-xr-xbin/kanbanscripts/showdone.sh4
-rwxr-xr-xbin/kanbanscripts/showinprogress.sh3
-rwxr-xr-xbin/kanbanscripts/workon.sh2
-rwxr-xr-xbin/lux-video-compress.sh5
-rw-r--r--bin/maildir2mbox.py78
-rwxr-xr-xbin/messages.sh2
-rwxr-xr-xbin/moonphase.py50
-rwxr-xr-xbin/mpd-playlist-export.py82
-rwxr-xr-xbin/mqq.sh6
-rwxr-xr-xbin/mts-convert.sh3
-rwxr-xr-xbin/mutt-notmuch-py.py101
-rwxr-xr-xbin/natlangdate.py33
-rwxr-xr-xbin/open-in-pane23
-rwxr-xr-xbin/opener.sh3
-rw-r--r--bin/parse_subids.py19
-rw-r--r--bin/parse_vivaldi_notes.py8
-rw-r--r--bin/pass-completion.bash130
-rwxr-xr-xbin/passmenu.sh25
-rwxr-xr-xbin/pinboard_links_to_markdown_files.py39
-rwxr-xr-xbin/pomodoro.sh139
-rwxr-xr-xbin/qutebrowser-cookiecleaner212
-rwxr-xr-xbin/rename_avi_files.sh1
-rwxr-xr-xbin/rename_iphone_files.sh3
-rwxr-xr-xbin/rename_mov_files.sh1
-rwxr-xr-xbin/rename_mp4_files.sh1
-rwxr-xr-xbin/rename_mts_files.sh1
-rwxr-xr-xbin/rename_phone_files.sh3
-rwxr-xr-xbin/rename_raw_files.sh3
-rwxr-xr-xbin/review.py51
-rwxr-xr-xbin/run_pandoc.sh4
-rwxr-xr-xbin/showall.sh3
-rwxr-xr-xbin/showalltype.sh8
-rwxr-xr-xbin/showdone.sh4
-rwxr-xr-xbin/showinprogress.sh3
-rwxr-xr-xbin/smartresize.sh3
-rw-r--r--bin/stardict-dictd-web1913-2.4.2.tar.bz2bin0 -> 28773233 bytes
-rw-r--r--bin/temp.js13
-rwxr-xr-xbin/tweet_archive_tools.py191
-rwxr-xr-xbin/upgrad_pg.sh8
-rwxr-xr-xbin/vcs_query.py358
-rw-r--r--bin/web1913.dict.dzbin0 -> 27330569 bytes
-rw-r--r--bin/web1913.idxbin0 -> 3024036 bytes
-rwxr-xr-xbin/weekly-rsync.sh20
-rwxr-xr-xbin/wired-count-deals-items.py24
-rwxr-xr-xbin/wired-count-h4-items.py10
-rw-r--r--bin/wired-dedup-h4-items.py13
-rwxr-xr-xbin/xchromium2
-rwxr-xr-xbin/xslack2
-rwxr-xr-xbin/xvirtualbox2
-rwxr-xr-xbin/xvivaldi2
-rwxr-xr-xbin/xzoom3
81 files changed, 4436 insertions, 0 deletions
diff --git a/bin/cal.sh b/bin/cal.sh
new file mode 100755
index 0000000..e6a38fb
--- /dev/null
+++ b/bin/cal.sh
@@ -0,0 +1,39 @@
+
+#! /bin/sh
+
+width=200
+height=200
+datefmt="+%a %m-%d %H:%M:%S"
+
+OPTIND=1
+while getopts ":f:W:H:" opt; do
+ case $opt in
+ f) datefmt="$OPTARG" ;;
+ W) width="$OPTARG" ;;
+ H) height="$OPTARG" ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument." >&2
+ exit 1
+ ;;
+ esac
+done
+
+case "$BLOCK_BUTTON" in
+ 1|2|3)
+
+ # the position of the upper left corner of the popup
+ posX=$(($BLOCK_X - $width / 2))
+ posY=$(($BLOCK_Y - $height))
+
+ swaymsg -q "exec gsimplecal \
+ --width=$width --height=$height \
+ --undecorated --fixed \
+ --close-on-unfocus --no-buttons \
+ --posx=$posX --posy=$posY \
+ > /dev/null"
+esac
+date "$datefmt"
diff --git a/bin/csvtommd.rb b/bin/csvtommd.rb
new file mode 100755
index 0000000..355b0f9
--- /dev/null
+++ b/bin/csvtommd.rb
@@ -0,0 +1,18 @@
+#!/usr/bin/ruby
+
+input = STDIN.read
+
+# find quoted cells and replace commas inside quotes with placeholder
+input.gsub!(/"([^,].*?)"/m) { |quoted|
+ quoted.gsub(/[\n\r]*/,'').gsub(/,/,'zXzX')
+}
+# replace remaining commas with table divider (pipe)
+input.gsub!(/,/,"| ")
+# remove quotes from quoted cells
+input.gsub!(/(\| |^)"(.*?)"/,"\\1\\2")
+# replace placeholders with commas
+input.gsub!(/zXzX/,",")
+
+input.each { |l|
+ puts "| #{l.strip} |"
+}
diff --git a/bin/daily-rsync.sh b/bin/daily-rsync.sh
new file mode 100644
index 0000000..f594ef1
--- /dev/null
+++ b/bin/daily-rsync.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# ----------------------------------------------------------------------
+# rotating-filesystem-snapshot utility
+# essentially, rotate backup-snapshots of /home
+# on a weekly basis using rsync and cron
+# ----------------------------------------------------------------------
+
+
+rm -rf /Volumes/MiniRSync/daily.4
+mv /Volumes/MiniRSync/daily.3 /Volumes/MiniRSync/daily.4
+mv /Volumes/MiniRSync/daily.2 /Volumes/MiniRSync/daily.3
+mv /Volumes/MiniRSync/daily.1 /Volumes/MiniRSync/daily.2
+mv /Volumes/MiniRSync/daily.0 /Volumes/MiniRSync/daily.1
+
+rsync -avvz \
+--delete-after \
+--exclude-from '/Users/sng/.rsync-exclude-daily' \
+--link-dest=/Volumes/MiniRSync/daily.1 \
+/Users/sng/ /Volumes/MiniRSync/daily.0/
diff --git a/bin/deflac.sh b/bin/deflac.sh
new file mode 100755
index 0000000..24d9780
--- /dev/null
+++ b/bin/deflac.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+for a in ./*.flac; do
+ ffmpeg -i "$a" -qscale:a 0 "${a[@]/%flac/mp3}"
+done
diff --git a/bin/devil.dict.dz b/bin/devil.dict.dz
new file mode 100644
index 0000000..f22d4f5
--- /dev/null
+++ b/bin/devil.dict.dz
Binary files differ
diff --git a/bin/devil.index b/bin/devil.index
new file mode 100644
index 0000000..13732c1
--- /dev/null
+++ b/bin/devil.index
@@ -0,0 +1,1008 @@
+00databasealphabet Bdrn c
+00databasedictfmt1120 A b
+00databaseinfo CP tM
+00databaseshort BV 6
+00databaseurl b 6
+abasement vb Cf
+abatis x6 Br
+abdication zl HW
+abdomen 67 Gd
+ability BBY FF
+abnormal BGd Fy
+aboriginies BMP CO
+abracadabra BOd cu
+abridge BrL FQ
+abrupt Bwb EG
+abscond B0h ED
+absent B4k Hq
+absentee CAO By
+absolute CCA F0
+abstainer CH0 HY
+absurdity CPM BW
+academe CQi BL
+academy CRt BL
+accident CS4 BV
+accomplice CUN E0
+accord CZB W
+accordion CZX BN
+accountability Cak EY
+accuse Ce8 B7
+acephalous Cg3 DX
+achievement CkO BC
+acknowledge ClQ B7
+acquaintance CnL DX
+actually Cqi k
+adage CrG p
+adamant Crv Bb
+adder CtK B1
+adherent Cu/ BP
+administration CwO DA
+admiral CzO Bm
+admiration C00 BP
+admonition C2D DX
+adore C5a n
+advice C6B Fn
+affianced C/o BD
+affliction DAr Bb
+african DCG r
+age DCx Ca
+agitator DFL Bh
+aim DGs EJ
+air DK1 Bj
+alderman DMY Bn
+alien DN/ 9
+allah DO8 Fl
+allegiance DUh EP
+alliance DYw Cw
+alligator Dbg Fh
+alone DhB EC
+altar DlD Iw
+ambidextrous Dtz BS
+ambition DvF B4
+amnesty Dw9 Bj
+anoint Dyg EH
+antipathy D2n BB
+aphorism D3o D9
+apologize D7l +
+apostate D8j C2
+apothecary D/Z HC
+appeal EGb BH
+appetite EHi Bl
+applause EJH o
+april fool EJv BH
+archbishop EK2 Ep
+architect EPf BW
+ardor EQ1 BD
+arena ER4 Bf
+aristocracy ETX Da
+armor EWx BN
+arrayed EX+ Bc
+arrest EZa DK
+arsenic Eck FB
+art Ehl Nr
+artlessness EvQ DG
+asperse EyW CC
+ass E0Y Ug
+auctioneer FI4 Bi
+australia FKa DW
+avernus FNw Im
+baal FWW J9
+babe FgT ND
+baby FgT ND
+bacchus FtW E/
+back FyV Bh
+backbite Fz2 BL
+bait F1B BZ
+baptism F2a IE
+barometer F+e Bc
+barrack F/6 Bx
+basilisk GBr IJ
+bastinado GJ0 9
+bath GKx Gv
+battle GRg Bo
+beard GTI B6
+beauty GVC BQ
+befriend GWS l
+beg GW3 ai
+beggar GxZ BC
+behavior Gyb Gf
+belladonna G46 CM
+benedictines G7G Fh
+benefactor HAn CZ
+berenices hair HDA IC
+bigamy HLC Bt
+bigot HMv Bj
+billingsgate HOS x
+birth HPD Ih
+blackguard HXk C2
+blankverse Haa C/
+bodysnatcher HdZ K2
+bondsman HoP Hb
+bore Hvq 7
+botany Hwl DW
+bottlenosed Hz7 BG
+boundary H1B Cd
+bounty H3e GV
+brahma H9z KU
+brain IIH Hg
+brandy IPn Ex
+bride IUY BC
+brute IVa b
+caaba IV1 Cp
+cabbage IYe IK
+calamity Igo DT
+callous Ij7 Fd
+calumnus IpY 1
+camel IqN DR
+cannibal Ite CG
+cannon Ivk BR
+canonicals Iw1 BE
+capital Ix5 F8
+carmelite I31 gu
+carnivorous JYj Bp
+cartesian JaM GR
+cat Jgd FI
+caviler Jll o
+cemetery JmN Kg
+centaur Jwt G6
+cerberus J3n KE
+childhood KBr C1
+christian KEg R4
+circus KWY B2
+clairvoyant KYO CR
+clarionet Kaf Cl
+clergyman KdE B2
+clio Ke6 EW
+clock KjQ HR
+closefisted Kqh HY
+coenobite Kx5 G/
+comfort K44 BU
+commendation K6M Bm
+commerce K7y CW
+commonwealth K+I VT
+compromise LTb DT
+compulsion LWu p
+condole LXX BK
+confidant LYh Be
+confidante LYh Be
+congratulation LZ/ r
+congress Laq 2
+connoisseur Lbg EB
+conservative Lfh CN
+consolation Lhu BV
+consul LjD Cu
+consult Llx BO
+contempt Lm/ Bl
+controversy Lok QE
+convent L4o Bq
+conversation L6S Cy
+coronation L9E CY
+corporal L/c FF
+corporation MEh Bp
+corsair MGK o
+court fool MGy g
+coward MHS BD
+crayfish MIV IW
+creditor MQr B8
+cremona MSn 4
+critic MTf HJ
+cross Mao TU
+cui bono Mt8 z
+cunning Muv D6
+cupid Myp JI
+curiosity M7x DK
+curse M+7 Eg
+cynic NDb C7
+damn NGW Im
+dance NO8 E9
+danger NT5 Do
+daring NXh BJ
+datary NYq DV
+dawn Nb/ H5
+day Nj4 ES
+dead NoK Dz
+debauchee Nr9 Bp
+debt Ntm Ip
+decalogue N2P RK
+decide OHZ L/
+defame OTY BH
+defenceless OUf m
+degenerate OVF IA
+degradation OdF Bv
+deinotherium Oe0 EN
+dejeuner OjB Ba
+delegation Okb BV
+deliberation Olw Bf
+deluge OnP Bq
+delusion Oo5 Gt
+dentist Ovm Bk
+dependent OxK B9
+deputy OzH Z1
+destiny PM8 BM
+diagnosis POI BY
+diaphragm PPg Bl
+diary PRF P8
+dictator PhB Bo
+dictionary Pip Co
+die PlR GX
+digestion Pro D5
+diplomacy Pvh +
+disabuse Pwf CG
+discriminate Pyl B8
+discussion P0h BA
+disobedience P1h BA
+disobey P2h E5
+dissemble P7a CZ
+distance P9z Bf
+distress P/S BN
+divination QAf Cj
+dog QDC JR
+dragoon QMT CP
+dramatist QOi 2
+druids QPY Mq
+duckbill QcC BP
+duel QdR Pq
+dullard Qs7 S4
+duty Q/z GI
+eat RF7 G+
+eavesdrop RM5 JS
+eccentricity RWL Bp
+economy RX0 B1
+edible RZp CU
+editor Rb9 aP
+education R2M Bs
+effect R34 Et
+egotist R8l JW
+ejection SF7 Bz
+elector SHu Bh
+electricity SJP QH
+elegy SZW Gj
+eloquence Sf5 Ca
+elysium SiT EK
+emancipation Smd Fh
+embalm Sr+ Kj
+emotion S2h C6
+encomiast S5b 9
+end S6Y G5
+enough TBR Do
+entertainment TE5 Ba
+enthusiasm TGT D3
+envelope TKK B6
+envy TME 2
+epaulet TM6 C2
+epicure TPw Ct
+epigram TSd Nn
+epitaph TgE E1
+erudition Tk5 E2
+esoteric Tpv Fk
+ethnology TvT CR
+eucharist Txk EU
+eulogy T14 B0
+evangelist T3s Ca
+everlasting T6G Hx
+exception UB3 JC
+excess UK5 Lr
+excommunication UWk Fs
+executive UcQ gp
+exhort U85 B7
+exile U+0 IE
+existence VG4 DN
+experience VKF GN
+expostulation VQS BZ
+extinction VRr BS
+fairy VS9 WB
+faith Vo+ B1
+famous Vqz Eh
+fashion VvU OV
+feast V9p Lm
+felon WJP CC
+female WLR dB
+fib WoS Nx
+fickleness W2D BE
+fiddle W3H Fx
+fidelity W84 BI
+finance W+A D/
+flag XB/ DK
+flesh XFJ 2
+flop XF/ Da
+flyspeck XJZ lN
+folly Xum OH
+fool X8t QN
+force YM6 DS
+forefinger YQM BL
+foreordination YRX M7
+forgetfulness YeS Bt
+fork Yf/ HG
+forma pauperis YnF KA
+frankalmoigne YxF MA
+freebooter Y9F B3
+freedom Y+8 Pq
+freemasons ZOm Mk
+friendless ZbK B4
+friendship ZdC GT
+frog ZjV Pp
+fryingpan Zy+ Vr
+funeral aIp HC
+future aPr By
+gallows aRd Gs
+gargoyle aYJ It
+garther ag2 B0
+genealogy alb Bt
+generous aiq Cx
+genteel anI FN
+geographer asV II
+geology a0d J3
+ghost a+U XV
+ghoul bVp ZY
+glutton bvB BU
+gnome bwV Jp
+gnostics b5+ Dq
+gnu b9o K0
+good cIc B4
+goose cKU I8
+gorgon cTQ Dv
+gout cW/ BE
+graces cYD Eg
+grammar ccj CR
+grape ce0 K1
+grapeshot cpp Bq
+grave crT Mz
+gravitation c4G Ft
+great c9z LS
+guillotine dJF NK
+gunpowder dWP hQ
+habeas corpus d3f Bj
+habit d5C k
+hades d5m Ri
+hag eLI JR
+half eUZ Jn
+halo eeA Lb
+hand epb Bu
+handkerchief erJ Id
+hangman ezm G8
+happiness e6i BZ
+harangue e77 BO
+harbor e9J Bk
+harmonists e+t DK
+hash fB3 BO
+hatchet fDF FM
+hatred fIR BP
+headmoney fJg hW
+hearse fq2 k
+heart fra UQ
+heat f/q GB
+heathen gFr Rw
+heaven gXb Cj
+hebrew gZ+ Bd
+helpmate gbb IK
+hemp gjl C1
+hermit gma BA
+hers gna T
+hibernate gnt RD
+hippogriff g4w Ev
+historian g9f m
+history g+F GZ
+hog hEe Ij
+homicide hRQ Em
+homiletics hV2 Og
+homoeopathist hNB 8
+homoeopathy hN9 DT
+honorable hkW C6
+hope hnQ Hz
+hospitality hvD B5
+hostility hw8 EK
+houri h1G Eb
+house h5h Jx
+houseless iDS 8
+hovel iEO Jh
+humanity iNv BQ
+humorist iO/ II
+hurricane iXH F1
+hurry ic8 m
+husband idi BM
+hybrid ieu d
+hydra ifL BM
+hyena igX Cp
+hypochondriasis ijA E5
+hypocrite in5 B+
+i ip3 JA
+ichor iy3 E8
+iconoclast i3z IK
+idiot i/9 G1
+idleness jGy B4
+ignoramus jIq JC
+illuminati jRs Ch
+illustrious jUN BT
+imagination jVg BP
+imbecility jWv Bv
+immigrant jYe BU
+immodest jZy Vi
+immoral jvU H8
+immortality j3Q EL
+impale j7b TG
+impartial kOh Cj
+impenitence kRE Bc
+impiety kSg w
+imposition kTQ H3
+impostor kbH x
+improbability kb4 PG
+improvidence kq+ BV
+impunity ksT X
+inadmissible ksq aV
+inards oce Jp
+inauspiciously lG/ lT
+income lsS Qx
+incompatibility l9D Dr
+incompossible mAu II
+incubus mI2 PF
+incumbent mX7 BH
+indecision mZC OW
+indifferent mnY FT
+indigestion msr Er
+indiscretion mxW n
+inexpedient mx9 /
+infalapsarian nce eM
+infancy my8 CV
+inferiae m1R S2
+infidel nIH S8
+influence nbD Bb
+ingrate n6q Jm
+injury oEQ BA
+injustice oFQ CO
+ink oHe LT
+innate oSx Jt
+inscription omH Tt
+insectivora o50 Er
+insurance o+f zJ
+insurrection pxo Bx
+intention pzZ DF
+interpreter p2e DG
+interregnum p5k EW
+intimacy p96 OT
+introduction qMN Qc
+inventor qcp B4
+irreligion qeh BF
+itch qfm q
+j qgQ KY
+jealous qqo Bt
+jester qsV Zq
+jewsharp rF/ B/
+josssticks rH+ CL
+justice rKJ Cj
+k rMs Sh
+keep rfN FK
+kill rkX BB
+kilt rlY BX
+kindness rmv 7
+king rnq Jc
+kings evil rxG hf
+kiss sSl D2
+kleptomaniac sWb h
+knight sW8 HV
+koran seR DB
+labor shS BF
+land siX PW
+language sxt BX
+laocoon szE F6
+lap s4+ Fp
+last s+n E+
+laughter tDl MS
+laureate tP3 Ht
+laurel tP3 Ht
+laurel tXk Cv
+law taT I7
+lawful tjO BI
+lawyer tkW 2
+laziness tlM BH
+lead tmT RE
+learning t3X BC
+lecturer t4Z Bo
+legacy t6B BK
+leonine t7L J5
+lettuce uFE Mf
+leviathan uRj G3
+lexicographer uYa iI
+liar u6i u
+liberty u7Q FQ
+lickspittle vAg JV
+life vJ1 MP
+lighthouse vWE B6
+limb vX+ VE
+linen vtC B+
+litigant vvA BX
+litigation vwX BU
+liver vxr Ic
+lld v6H LD
+lockandkey wFK BQ
+lodger wGa CF
+logic wIf Jw
+logomachy wSP GY
+longanimity wYn Br
+longevity waS 5
+lookingglass wbL cG
+loquacity w3R Bn
+lord w44 k9
+lore xd1 PF
+loss xs6 Hc
+love x0W G8
+lowbred x7S x
+luminary x8D Ba
+lunarian x9d FW
+lyre yCz J5
+mace yMs CX
+machination yPD Ha
+macrobian yWd rz
+mad zCQ MG
+magdalene zOW Ie
+magic zW0 Ch
+magnet zZV w
+magnetism zaF EP
+magnificent zeU C2
+magnitude zhK NI
+magpie zuS Bl
+maiden zv3 Ok
+majesty z+b Dm
+male 0CB DI
+malefactor 0FJ BF
+malthusian 0GO E5
+mammalia 0LH C1
+mammon 0N8 EZ
+man 0SV Tu
+manes 0mD Dl
+manicheism 0po Cp
+manna 0sR Dg
+marriage 0vx B8
+martyr 0xt BS
+material 0y/ D3
+mausoleum 022 6
+mayonnaise 03w BY
+me 05I Ck
+meander 07s D/
+medal 0/r FA
+medicine 1Er BI
+meekness 1Fz Ft
+meerschaum 1Lg T5
+mendacious 1fZ p
+merchant 1gC B5
+mercy 1h7 4
+mesmerism 1iz Bo
+metropolis 1kb w
+millennium 1lL B6
+mind 1nF Hw
+mine 1u1 4
+minister 1vt EY
+minor 10F i
+minstrel 10n CU
+miracle 127 CZ
+miscreant 15U Dc
+misdemeanor 18w JM
+misericorde 2F8 CE
+misfortune 2IA 4
+miss 2I4 H5
+molecule 2Qx L/
+monad 2cw Lf
+monarch 2oP HS
+monarchical government 2vh p
+monday 2wK BG
+money 2xQ Cm
+monkey 2z2 BS
+monosyllabic 21I HV
+monsignor 28d Bt
+monument 2+K G6
+moral 3FE H9
+more 3NB x
+mouse 3Ny Ol
+mousquetaire 3cX CN
+mouth 3ek BQ
+mugwump 3f0 B5
+mulatto 3ht 1
+multitude 3ii JR
+mummy 3rz L1
+mustang 33o B2
+myrmidon 35e BL
+mythology 36p C9
+nectar 39m Gh
+negro 4EH D2
+neighbor 4H9 B1
+nepotism 4Jy BQ
+newtonian 4LC Dw
+nihilist 4Oy Bx
+nirvana 4Qj CV
+nobleman 4S4 B5
+noise 4Ux B0
+nominate 4Wl Ch
+nominee 4ZG CR
+noncombatant 4bX j
+nonsense 4b6 BQ
+nose 4dK Nw
+notoriety 4q6 Db
+noumenon 4uV G2
+novel 41L Py
+november 5E9 0
+oath 5Fx Bs
+oblivion 5Hd Ev
+observatory 5MM Bf
+obsessed 5Nr NR
+obsolete 5a8 K0
+obstinate 5lw DB
+occasional 5ox F7
+occident 5us E7
+ocean 5zn Bk
+offensive 51L ER
+old 55c HE
+oleaginous 6Ag FN
+olympian 6Ft G3
+omen 6Mk BB
+once 6Nl V
+opera 6N6 Gw
+opiate 6Uq BX
+opportunity 6WB BG
+oppose 6XH EU
+opposition 6bb Y+
+optimism 60Z IH
+optimist 68g F+
+oratory 7Ce B1
+orphan 7ET Hk
+orthodox 7L3 5
+orthography 7Mw Hn
+ostrich 7UX Eu
+otherwise 7ZF d
+outcome 7Zi Eu
+outdo 7eQ g
+outofdoors 7ew SC
+ovation 7wy Kr
+overeat 77d EI
+overwork 7/l Bg
+owe 8BF Db
+oyster 8Eg Ck
+pain 8HE C2
+painting 8J6 Ev
+palace 8Op D6
+palm 8Sj Hz
+palmistry 8aW Gb
+pandemonium 8gx Ev
+pantaloons 8lg EB
+pantheism 8ph By
+pantomime 8rT CF
+pardon 8tY B+
+passport 8vW Ci
+past 8x4 Mb
+pastime 8+T Bc
+patience 8/v /
+patriot 9Au CK
+patriotism 9C4 Et
+peace 9Hl JS
+pedestrian 9Q3 BS
+pedigree 9SJ CC
+penitent 9UL 0
+perfection 9U/ GE
+peripatetic 9bD Du
+peroration 9ex DZ
+perseverance 9iK Lf
+pessimism 9tp Cq
+philanthropist 9wT CJ
+philistine 9yc DQ
+philosophy 91s BI
+phoenix 920 BG
+phonograph 936 BG
+photograph 95A Cq
+phrenology 97q CR
+physician 997 BQ
+physiognomy 9/L Ic
+piano +Hn CX
+pickaninny +J+ CJ
+picture +MH Ez
+pie +Q6 HM
+piety +YG EF
+pig +cL C0
+pigmy +e/ Dp
+pilgrim +io EH
+pillory +mv Co
+piracy +pX BH
+pitiful +qe Bd
+pity +r7 /
+plagiarism +s6 Bt
+plagiarize +un Bg
+plague +wH Eh
+plan +0o BU
+platitude +18 Hn
+platonic +9j CP
+plaudits +/y BS
+please /BE BH
+pleasure /CL 0
+plebeian /C/ Cg
+plebiscite /Ff BI
+plenipotentiary /Gn Cb
+pleonasm /JC BB
+plow /KD BK
+plunder /LN EN
+pocket /Pa DS
+poetry /Ss BN
+poker /T5 Bg
+police /VZ +
+politeness /WX w
+politician /ZL EU
+politics /XH CE
+polygamy /df CX
+populist /f2 H/
+portable /n1 Ef
+portuguese /sU CY
+positive /us 1
+positivism /vh C7
+posterity /yc CR
+potable /0t Jf
+poverty /+M GE
+pray BAEQ Bz
+preadamite BAGD F9
+precedent BAMA IP
+precedent BAXJ IP
+precipitate BAUP C6
+precipitate BAfY C6
+predestination BAiS If
+predicament BAqx r
+predilection BArc 5
+preexistence BAsV z
+preference BAtI FN
+prehistoric BAyV Hi
+prejudice BA53 BE
+prelate BA67 CN
+prerogative BA9I z
+presbyterian BA97 B7
+prescription BA/2 Bv
+present BBBl Bi
+presentable BBDH Fj
+preside BBIq Kf
+presidency BBTJ BJ
+president BBUS Jo
+prevaricator BBd6 1
+price BBev Be
+primate BBgN EY
+prison BBkl Dl
+private BBoK Bv
+proboscis BBp5 LE
+projectile BB09 Hu
+proof BB8r CY
+proofreader BB/D CE
+property BCBH ED
+prophecy BCFK BW
+prospect BCGg Eo
+providential BCLI Bf
+prude BCMn 7
+publish BCNi Bb
+push BCO9 Bq
+pyrrhonism BCQn Ct
+queen BCTU B1
+quill BCVJ DW
+quiver BCYf GN
+quixotic BCes GV
+quorum BClB E5
+quotation BCp6 Fd
+quotient BCvX Cz
+rabble BCyK FH
+rack BC3R Dx
+radicalism BDfp BU
+radium BDg9 Bl
+railroad BDii EE
+ramshackle BDmm Gh
+rank BC7C Eg
+ransom BC/i CL
+rapacity BDBt BC
+rarebit BDCv E6
+rascal BDHp 1
+rascality BDIe BK
+rash BDJo Ei
+rational BDOK Bf
+rattlesnake BDPp /
+razor BDQo Cg
+reach BDTI GG
+reading BDZO Gb
+realism BDtH Ce
+reality BDvl CN
+really BDxy b
+rear BDyN Bk
+reason BD0x o
+reason BDzx BA
+reasonable BD1Z B3
+rebel BD3Q BJ
+recollect BD4Z BJ
+reconciliation BD5i Bq
+reconsider BD7M BG
+recount BD8S B0
+recreation BD+G BO
+recruit BD/U Fh
+rector BEE1 CA
+redemption BEG1 LJ
+redress BER+ GE
+redskin BEYC Be
+redundant BEZg Gz
+referendum BEgT B6
+reflection BEiN C3
+reform BElE BN
+refuge BEmR Hw
+refusal BEuB HD
+regalia BE1E Y5
+religion BFN9 GO
+reliquary BFUL PH
+renown BFjS I+
+reparation BFsQ Bx
+repartee BFuB DP
+repentance BFxQ Gt
+replica BF39 Eo
+reporter BF8l Fm
+repose BGCL o
+representative BGCz CN
+reprobation BGFA Ek
+republic BGJk HY
+requiem BGQ8 DA
+resident BGT8 i
+resign BGUe IF
+resolute BGcj 4
+respectability BGdb Ba
+respirator BGe1 Cd
+respite BGhS TT
+resplendent BG0l F0
+respond BG6Z GK
+responsibility BHAj I5
+restitutions BHJc Bl
+restitutor BHLB t
+retaliation BHLu BL
+retribution BHM5 L4
+reveille BHYx Ep
+revelation BHda CW
+reverence BHfw BN
+review BHg9 Do
+revolution BHkl Ko
+rhadomancer BHvN Bq
+ribaldry BHw3 BC
+ribroaster BHx5 Ez
+ricewater BH2s EQ
+rich BH68 Fa
+riches BIAW ID
+ridicule BIIZ Hv
+right BIQI QF
+righteousness BIgN St
+rime BIy6 Ct
+rimer BI1n GW
+riot BI79 BR
+rip BI9O Da
+rite BJAo CV
+ritualism BJC9 Bn
+road BJEk Ex
+robber BJJV F3
+romance BJPM Pf
+rope BJer FQ
+rostrum BJj7 C8
+roundhead BJm3 MS
+rubbish BJzJ Co
+ruin BJ1x BZ
+rum BJ3K BP
+rumor BJ4Z Jo
+russian BKCB BV
+sabbath BKDW Pb
+sacerdotalist BKSx Dd
+sacrament BKWO F1
+sacred BKcD IV
+safetyclutch BKuC gR
+saint BLOT Gd
+salacity BLUw F9
+salamander BLat Er
+sandlotter BKkY Jq
+sarcophagus BLfY ED
+satan BLjb Kz
+satiety BLuO Bd
+satire BLvr QC
+satyr BL/t Gp
+sauce BMGW EA
+saw BMKW P/
+scarabaeus BMaV Ha
+scarabee BMhv Pv
+scarification BMxe Ke
+scepter BM78 Di
+scimetar BM/e uq
+scrapbook BNuI Nx
+scribbler BN75 BR
+scriptures BN9K CS
+seal BN/c aT
+seine BOZv Ju
+selfesteem BOjd t
+selfevident BOkK /
+selfish BOlJ BH
+senate BOmQ BU
+serial BOnk Pm
+severalty BO3K Mg
+sheriff BPDq dc
+siren BPhG Da
+slang BPkg E+
+smithareen BPpe Qo
+sophistry BP6G LJ
+sorcery BQFP HU
+soul BQMj l8
+spooker BQyf Hf
+story BQ5+ Bd2
+success BSX0 I/
+suffrage BSgz Ny
+sycophant BSul aM
+syllogism BTIx B0
+sylph BTKl IH
+symbol BTSs Gn
+symbolic BTZT KB
+t BTjU FO
+table dhote BToi HJ
+tail BTvr Lr
+take BT7W BI
+talk BT8e Bc
+tariff BT96 du
+technicality BUbo IE
+tedium BUjs Fb
+teetotaler BUpH Bl
+telephone BUqs CE
+telescope BUsw D0
+tenacity BUwk M4
+theosophy BU9c M4
+tights BVKU US
+tomb BVem H2
+tope BVmc Ph
+tortoise BV19 e0
+tree BWUx Uc
+trial BWpN hA
+trichinosis BXKN Ia
+trinity BXSn LR
+troglodyte BXd4 Fi
+truce BXja Y
+trust BXoZ Dh
+truth BXjy EB
+truthful BXnz m
+tsetse fly BXxq Dt
+turkey BXr6 C5
+twice BXuz e
+type BXvR CZ
+tzetze fly BXxq Dt
+ubiquity BX1X ME
+ugliness BYBb BX
+ultimatum BYCy RD
+unamerican BYT1 1
+unction BYUq H4
+understanding BYci Ku
+unitarian BYnQ +
+universalist BYoO BZ
+urbanity BYpn KI
+usage BYzv D4
+uxoriousness BY3n BN
+valor BY40 F8
+vanity BY+w Mx
+virtues BZLh m
+vituperation BZMH Bq
+vote BZNx Bw
+w BZPh KR
+wall street BZZy QM
+war BZp+ R3
+washingtonian BZ71 HM
+weaknesses BaDB DE
+weather BaGF RH
+wedding BaXM CX
+werewolf BaZj LL
+whangdepootenawah Baku bO
+wheat Ba/8 Eb
+white BbEX c
+widow BbEz C4
+wine BbHr CV
+wit BbKA Bn
+witch BbLn Ct
+witticism BbOU B+
+woman BbQS Oi
+wormsmeat Bbe0 Ts
+worship Bbyg Cc
+wrath Bb08 Mw
+x BcBs Ko
+yankee BcMU Ca
+year BcOu BF
+yesterday BcPz OU
+yoke BceH D4
+youth Bch/ Jg
+zany Bcrf Im
+zanzibari Bc0F NH
+zeal BdBM GB
+zenith BdHN O5
+zeus BdWG IF
+zigzag BdeL GG
+zoology BdkR HW
diff --git a/bin/dictionary.zip b/bin/dictionary.zip
new file mode 100644
index 0000000..18cf68c
--- /dev/null
+++ b/bin/dictionary.zip
Binary files differ
diff --git a/bin/download-images.py b/bin/download-images.py
new file mode 100755
index 0000000..5ad7e73
--- /dev/null
+++ b/bin/download-images.py
@@ -0,0 +1,30 @@
+#! /bin/python
+import os
+from shutil import copyfile
+import sys
+if os.path.exists(sys.argv[1]):
+ path = os.path.abspath(sys.argv[1])
+else:
+ print("Cannot find " + sys.argv[1])
+ exit()
+with open('/home/lxf/.photocopyrc', 'r') as f:
+ lastfile = str(f.readline().rstrip())
+sorter = []
+for (dirname, dirs, files) in os.walk(path):
+ dirn = os.path.abspath(dirname)
+ for filename in files:
+ if filename.endswith('.ARW'):
+ if int(filename.split('DSC')[1].split(".ARW")[0]) > int(lastfile.split('DSC')[1].split(".ARW")[0]):
+ sorter.append([int(filename.split('DSC')[1].split(".ARW")[0]), filename])
+for f in sorted(sorter):
+ dest = '/home/lxf/pictures/inbox/'
+ if not os.path.exists(dest):
+ os.makedirs(dest)
+ print("copying:", dirn+'/'+f[1], "--->", dest+f[1])
+ copyfile(dirn+'/'+f[1], dest+f[1])
+ try:
+ out = sorted(sorter)[-1]
+ with open('/home/lxf/.photocopyrc', 'w') as f:
+ f.write(out[1])
+ except IndexError:
+ pass
diff --git a/bin/download-videos.py b/bin/download-videos.py
new file mode 100644
index 0000000..194edf1
--- /dev/null
+++ b/bin/download-videos.py
@@ -0,0 +1,29 @@
+#! /bin/python
+import os
+from shutil import copyfile
+import sys
+if os.path.exists(sys.argv[1]):
+ path = os.path.abspath(sys.argv[1])
+else:
+ print("Cannot find " + sys.argv[1])
+ exit()
+with open('/home/lxf/.videocopyrc', 'r') as f:
+ lastfile = str(f.readline().rstrip())
+sorter = []
+for (dirname, dirs, files) in os.walk(path):
+ dirn = os.path.abspath(dirname)
+ for filename in files:
+ if int(filename.split('.MTS')[0]) > int(lastfile.split('.MTS')[0]):
+ sorter.append([int(filename.split('.MTS')[0]), filename])
+for f in sorted(sorter):
+ dest = '/home/lxf/videos/inbox/'
+ if not os.path.exists(dest):
+ os.makedirs(dest)
+ print("copying:", dirn+'/'+f[1], "--->", dest+f[1])
+ copyfile(dirn+'/'+f[1], dest+f[1])
+ try:
+ out = sorted(sorter)[-1]
+ with open('/home/lxf/.videocopyrc', 'w') as f:
+ f.write(out[1])
+ except IndexError:
+ pass
diff --git a/bin/dropkick.sh b/bin/dropkick.sh
new file mode 100755
index 0000000..ddda1bd
--- /dev/null
+++ b/bin/dropkick.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+#
+# DROPKICK.SH
+#
+# Detect and Disconnect the DropCam and Withings devices some people are using to
+# spy on guests in their home, especially in AirBnB rentals. Based on Glasshole.sh:
+#
+# http://julianoliver.com/output/log_2014-05-30_20-52
+#
+# This script was named by Adam Harvey (http://ahprojects.com), who also
+# encouraged me to write it. It requires a GNU/Linux host (laptop, Raspberry Pi,
+# etc) and the aircrack-ng suite. I put 'beep' in there for a little audio
+# notification. Comment it out if you don't need it.
+#
+# See also http://plugunplug.net, for a plug-and-play device that does this
+# based on OpenWrt. Code here:
+#
+# https://github.com/JulianOliver/CyborgUnplug
+#
+# Save as dropkick.sh, 'chmod +x dropkick.sh' and exec as follows:
+#
+# sudo ./dropkick.sh <WIRELESS NIC> <BSSID OF ACCESS POINT>
+
+shopt -s nocasematch # Set shell to ignore case
+shopt -s extglob # For non-interactive shell.
+
+readonly NIC=$1 # Your wireless NIC
+readonly BSSID=$2 # Network BSSID (AirBnB WiFi network)
+readonly MAC=$(/sbin/ifconfig | grep $NIC | head -n 1 | awk '{ print $5 }')
+# MAC=$(ip link show "$NIC" | awk '/ether/ {print $2}') # If 'ifconfig' not
+# present.
+readonly GGMAC='@(30:8C:FB*|00:24:E4*)' # Match against DropCam and Withings
+readonly POLL=30 # Check every 30 seconds
+readonly LOG=/var/log/dropkick.log
+
+airmon-ng stop mon0 # Pull down any lingering monitor devices
+airmon-ng start $NIC # Start a monitor device
+
+while true;
+ do
+ for TARGET in $(arp-scan -I $NIC --localnet | grep -o -E \
+ '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}')
+ do
+ if [[ "$TARGET" == "$GGMAC" ]]
+ then
+ # Audio alert
+ beep -f 1000 -l 500 -n 200 -r 2
+ echo "WiFi camera discovered: "$TARGET >> $LOG
+ aireplay-ng -0 1 -a $BSSID -c $TARGET mon0
+ echo "De-authed: "$TARGET " from network: " $BSSID >> $LOG
+ echo '
+ __ __ _ __ __
+ ___/ /______ ___ / /__ (_)___/ /_____ ___/ /
+ / _ / __/ _ \/ _ \/ _// / __/ _/ -_) _ /
+ \_,_/_/ \___/ .__/_/\_\/_/\__/_/\_\\__/\_,_/
+ /_/
+
+ '
+ else
+ echo $TARGET": is not a DropCam or Withings device. Leaving alone.."
+ fi
+ done
+ echo "None found this round."
+ sleep $POLL
+done
+airmon-ng stop mon0
diff --git a/bin/facebook.sh b/bin/facebook.sh
new file mode 100755
index 0000000..8baae86
--- /dev/null
+++ b/bin/facebook.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+GDK_BACKEND=x11 exo-open ~/bin/apps/book.desktop
diff --git a/bin/fastmail.sh b/bin/fastmail.sh
new file mode 100755
index 0000000..815afe4
--- /dev/null
+++ b/bin/fastmail.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+GDK_BACKEND=x11 exo-open ~/bin/apps/mail.desktop
diff --git a/bin/generate_video_poster.sh b/bin/generate_video_poster.sh
new file mode 100755
index 0000000..0755ad9
--- /dev/null
+++ b/bin/generate_video_poster.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# take in mp4, take screenshot at given interval, output same filename but with jpg extension
+FILE="$1"
+NEWFILE="${FILE%.mp4}.jpg"
+INTERVAL=$2
+echo "file - $FILE"
+ffmpeg -y -i $FILE -f mjpeg -vframes 1 -ss $INTERVAL $NEWFILE
diff --git a/bin/get_links.sh b/bin/get_links.sh
new file mode 100755
index 0000000..d14cee0
--- /dev/null
+++ b/bin/get_links.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+cd ~/bin
+source venv/bin/activate
+python pinboard_links_to_markdown_files.py
+deactivate
diff --git a/bin/gmail.sh b/bin/gmail.sh
new file mode 100755
index 0000000..1684967
--- /dev/null
+++ b/bin/gmail.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+GDK_BACKEND=x11 exo-open ~/bin/apps/gmailer.desktop
diff --git a/bin/gvim b/bin/gvim
new file mode 100755
index 0000000..9922ba6
--- /dev/null
+++ b/bin/gvim
@@ -0,0 +1,2 @@
+#!/bin/bash
+urxvt -e vim "$@"
diff --git a/bin/hangouts.sh b/bin/hangouts.sh
new file mode 100755
index 0000000..5579dca
--- /dev/null
+++ b/bin/hangouts.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+GDK_BACKEND=x11 exo-open ~/bin/apps/outs.desktop
diff --git a/bin/havedone.sh b/bin/havedone.sh
new file mode 100755
index 0000000..7b71aca
--- /dev/null
+++ b/bin/havedone.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+cd ~/gtd
+mkdir -p ~/gtd/done/$(date '+%Y-%m-%d')
+OIFS="$IFS"
+IFS=$'\n'
+for f in $(find . -maxdepth 1 -type f -print0 | xargs -0 grep -li @done)
+ do
+ print $f
+ mv $f ~/gtd/done/$(date '+%Y-%m-%d')
+ done
+IFS="$OIFS"
diff --git a/bin/havedonegit.sh b/bin/havedonegit.sh
new file mode 100755
index 0000000..59cb066
--- /dev/null
+++ b/bin/havedonegit.sh
@@ -0,0 +1,10 @@
+OIFS="$IFS"
+IFS=$'\n'
+for f in $(git log --branches --pretty=format:"%s" --since="5am")
+ do
+ #watch out for / in commit messages
+ nf=${f//\//:}
+ echo $nf
+ touch ~/gtd/done/$(date '+%Y-%m-%d')/"$nf".txt
+ done
+IFS="$OIFS"
diff --git a/bin/html2text.py b/bin/html2text.py
new file mode 100644
index 0000000..1752890
--- /dev/null
+++ b/bin/html2text.py
@@ -0,0 +1,914 @@
+#!/usr/bin/env python
+"""html2text: Turn HTML into equivalent Markdown-structured text."""
+__version__ = "3.200.3"
+__author__ = "Aaron Swartz (me@aaronsw.com)"
+__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
+__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
+
+# TODO:
+# Support decoded entities with unifiable.
+
+try:
+ True
+except NameError:
+ setattr(__builtins__, 'True', 1)
+ setattr(__builtins__, 'False', 0)
+
+def has_key(x, y):
+ if hasattr(x, 'has_key'): return x.has_key(y)
+ else: return y in x
+
+try:
+ import htmlentitydefs
+ import urlparse
+ import HTMLParser
+except ImportError: #Python3
+ import html.entities as htmlentitydefs
+ import urllib.parse as urlparse
+ import html.parser as HTMLParser
+try: #Python3
+ import urllib.request as urllib
+except:
+ import urllib
+import optparse, re, sys, codecs, types
+
+try: from textwrap import wrap
+except: pass
+
+# Use Unicode characters instead of their ascii psuedo-replacements
+UNICODE_SNOB = 0
+
+# Escape all special characters. Output is less readable, but avoids corner case formatting issues.
+ESCAPE_SNOB = 0
+
+# Put the links after each paragraph instead of at the end.
+LINKS_EACH_PARAGRAPH = 0
+
+# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
+BODY_WIDTH = 78
+
+# Don't show internal links (href="#local-anchor") -- corresponding link targets
+# won't be visible in the plain text file anyway.
+SKIP_INTERNAL_LINKS = True
+
+# Use inline, rather than reference, formatting for images and links
+INLINE_LINKS = True
+
+# Number of pixels Google indents nested lists
+GOOGLE_LIST_INDENT = 36
+
+IGNORE_ANCHORS = False
+IGNORE_IMAGES = False
+IGNORE_EMPHASIS = False
+
+### Entity Nonsense ###
+
+def name2cp(k):
+ if k == 'apos': return ord("'")
+ if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
+ return htmlentitydefs.name2codepoint[k]
+ else:
+ k = htmlentitydefs.entitydefs[k]
+ if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
+ return ord(codecs.latin_1_decode(k)[0])
+
+unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
+'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
+'ndash':'-', 'oelig':'oe', 'aelig':'ae',
+'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
+'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
+'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
+'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
+'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
+'lrm':'', 'rlm':''}
+
+unifiable_n = {}
+
+for k in unifiable.keys():
+ unifiable_n[name2cp(k)] = unifiable[k]
+
+### End Entity Nonsense ###
+
+def onlywhite(line):
+ """Return true if the line does only consist of whitespace characters."""
+ for c in line:
+ if c is not ' ' and c is not ' ':
+ return c is ' '
+ return line
+
+def hn(tag):
+ if tag[0] == 'h' and len(tag) == 2:
+ try:
+ n = int(tag[1])
+ if n in range(1, 10): return n
+ except ValueError: return 0
+
+def dumb_property_dict(style):
+ """returns a hash of css attributes"""
+ return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
+
+def dumb_css_parser(data):
+ """returns a hash of css selectors, each of which contains a hash of css attributes"""
+ # remove @import sentences
+ data += ';'
+ importIndex = data.find('@import')
+ while importIndex != -1:
+ data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
+ importIndex = data.find('@import')
+
+ # parse the css. reverted from dictionary compehension in order to support older pythons
+ elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
+ try:
+ elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
+ except ValueError:
+ elements = {} # not that important
+
+ return elements
+
+def element_style(attrs, style_def, parent_style):
+ """returns a hash of the 'final' style attributes of the element"""
+ style = parent_style.copy()
+ if 'class' in attrs:
+ for css_class in attrs['class'].split():
+ css_style = style_def['.' + css_class]
+ style.update(css_style)
+ if 'style' in attrs:
+ immediate_style = dumb_property_dict(attrs['style'])
+ style.update(immediate_style)
+ return style
+
+def google_list_style(style):
+ """finds out whether this is an ordered or unordered list"""
+ if 'list-style-type' in style:
+ list_style = style['list-style-type']
+ if list_style in ['disc', 'circle', 'square', 'none']:
+ return 'ul'
+ return 'ol'
+
+def google_has_height(style):
+ """check if the style of the element has the 'height' attribute explicitly defined"""
+ if 'height' in style:
+ return True
+ return False
+
+def google_text_emphasis(style):
+ """return a list of all emphasis modifiers of the element"""
+ emphasis = []
+ if 'text-decoration' in style:
+ emphasis.append(style['text-decoration'])
+ if 'font-style' in style:
+ emphasis.append(style['font-style'])
+ if 'font-weight' in style:
+ emphasis.append(style['font-weight'])
+ return emphasis
+
+def google_fixed_width_font(style):
+ """check if the css of the current element defines a fixed width font"""
+ font_family = ''
+ if 'font-family' in style:
+ font_family = style['font-family']
+ if 'Courier New' == font_family or 'Consolas' == font_family:
+ return True
+ return False
+
+def list_numbering_start(attrs):
+ """extract numbering from list element attributes"""
+ if 'start' in attrs:
+ return int(attrs['start']) - 1
+ else:
+ return 0
+
+class HTML2Text(HTMLParser.HTMLParser):
+ def __init__(self, out=None, baseurl=''):
+ HTMLParser.HTMLParser.__init__(self)
+
+ # Config options
+ self.unicode_snob = UNICODE_SNOB
+ self.escape_snob = ESCAPE_SNOB
+ self.links_each_paragraph = LINKS_EACH_PARAGRAPH
+ self.body_width = BODY_WIDTH
+ self.skip_internal_links = SKIP_INTERNAL_LINKS
+ self.inline_links = INLINE_LINKS
+ self.google_list_indent = GOOGLE_LIST_INDENT
+ self.ignore_links = IGNORE_ANCHORS
+ self.ignore_images = IGNORE_IMAGES
+ self.ignore_emphasis = IGNORE_EMPHASIS
+ self.google_doc = False
+ self.ul_item_mark = '*'
+ self.emphasis_mark = '_'
+ self.strong_mark = '**'
+
+ if out is None:
+ self.out = self.outtextf
+ else:
+ self.out = out
+
+ self.outtextlist = [] # empty list to store output characters before they are "joined"
+
+ try:
+ self.outtext = unicode()
+ except NameError: # Python3
+ self.outtext = str()
+
+ self.quiet = 0
+ self.p_p = 0 # number of newline character to print before next output
+ self.outcount = 0
+ self.start = 1
+ self.space = 0
+ self.a = []
+ self.astack = []
+ self.maybe_automatic_link = None
+ self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
+ self.acount = 0
+ self.list = []
+ self.blockquote = 0
+ self.pre = 0
+ self.startpre = 0
+ self.code = False
+ self.br_toggle = ''
+ self.lastWasNL = 0
+ self.lastWasList = False
+ self.style = 0
+ self.style_def = {}
+ self.tag_stack = []
+ self.emphasis = 0
+ self.drop_white_space = 0
+ self.inheader = False
+ self.abbr_title = None # current abbreviation definition
+ self.abbr_data = None # last inner HTML (for abbr being defined)
+ self.abbr_list = {} # stack of abbreviations to write later
+ self.baseurl = baseurl
+
+ try: del unifiable_n[name2cp('nbsp')]
+ except KeyError: pass
+ unifiable['nbsp'] = '&nbsp_place_holder;'
+
+
+ def feed(self, data):
+ data = data.replace("</' + 'script>", "</ignore>")
+ HTMLParser.HTMLParser.feed(self, data)
+
+ def handle(self, data):
+ self.feed(data)
+ self.feed("")
+ return self.optwrap(self.close())
+
+ def outtextf(self, s):
+ self.outtextlist.append(s)
+ if s: self.lastWasNL = s[-1] == '\n'
+
+ def close(self):
+ HTMLParser.HTMLParser.close(self)
+
+ self.pbr()
+ self.o('', 0, 'end')
+
+ self.outtext = self.outtext.join(self.outtextlist)
+ if self.unicode_snob:
+ nbsp = unichr(name2cp('nbsp'))
+ else:
+ nbsp = u' '
+ self.outtext = self.outtext.replace(u'&nbsp_place_holder;', nbsp)
+
+ return self.outtext
+
+ def handle_charref(self, c):
+ self.o(self.charref(c), 1)
+
+ def handle_entityref(self, c):
+ self.o(self.entityref(c), 1)
+
+ def handle_starttag(self, tag, attrs):
+ self.handle_tag(tag, attrs, 1)
+
+ def handle_endtag(self, tag):
+ self.handle_tag(tag, None, 0)
+
+ def previousIndex(self, attrs):
+ """ returns the index of certain set of attributes (of a link) in the
+ self.a list
+
+ If the set of attributes is not found, returns None
+ """
+ if not has_key(attrs, 'href'): return None
+
+ i = -1
+ for a in self.a:
+ i += 1
+ match = 0
+
+ if has_key(a, 'href') and a['href'] == attrs['href']:
+ if has_key(a, 'title') or has_key(attrs, 'title'):
+ if (has_key(a, 'title') and has_key(attrs, 'title') and
+ a['title'] == attrs['title']):
+ match = True
+ else:
+ match = True
+
+ if match: return i
+
+ def drop_last(self, nLetters):
+ if not self.quiet:
+ self.outtext = self.outtext[:-nLetters]
+
+ def handle_emphasis(self, start, tag_style, parent_style):
+ """handles various text emphases"""
+ tag_emphasis = google_text_emphasis(tag_style)
+ parent_emphasis = google_text_emphasis(parent_style)
+
+ # handle Google's text emphasis
+ strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
+ bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
+ italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
+ fixed = google_fixed_width_font(tag_style) and not \
+ google_fixed_width_font(parent_style) and not self.pre
+
+ if start:
+ # crossed-out text must be handled before other attributes
+ # in order not to output qualifiers unnecessarily
+ if bold or italic or fixed:
+ self.emphasis += 1
+ if strikethrough:
+ self.quiet += 1
+ if italic:
+ self.o(self.emphasis_mark)
+ self.drop_white_space += 1
+ if bold:
+ self.o(self.strong_mark)
+ self.drop_white_space += 1
+ if fixed:
+ self.o('`')
+ self.drop_white_space += 1
+ self.code = True
+ else:
+ if bold or italic or fixed:
+ # there must not be whitespace before closing emphasis mark
+ self.emphasis -= 1
+ self.space = 0
+ self.outtext = self.outtext.rstrip()
+ if fixed:
+ if self.drop_white_space:
+ # empty emphasis, drop it
+ self.drop_last(1)
+ self.drop_white_space -= 1
+ else:
+ self.o('`')
+ self.code = False
+ if bold:
+ if self.drop_white_space:
+ # empty emphasis, drop it
+ self.drop_last(2)
+ self.drop_white_space -= 1
+ else:
+ self.o(self.strong_mark)
+ if italic:
+ if self.drop_white_space:
+ # empty emphasis, drop it
+ self.drop_last(1)
+ self.drop_white_space -= 1
+ else:
+ self.o(self.emphasis_mark)
+ # space is only allowed after *all* emphasis marks
+ if (bold or italic) and not self.emphasis:
+ self.o(" ")
+ if strikethrough:
+ self.quiet -= 1
+
+ def handle_tag(self, tag, attrs, start):
+ #attrs = fixattrs(attrs)
+ if attrs is None:
+ attrs = {}
+ else:
+ attrs = dict(attrs)
+
+ if self.google_doc:
+ # the attrs parameter is empty for a closing tag. in addition, we
+ # need the attributes of the parent nodes in order to get a
+ # complete style description for the current element. we assume
+ # that google docs export well formed html.
+ parent_style = {}
+ if start:
+ if self.tag_stack:
+ parent_style = self.tag_stack[-1][2]
+ tag_style = element_style(attrs, self.style_def, parent_style)
+ self.tag_stack.append((tag, attrs, tag_style))
+ else:
+ dummy, attrs, tag_style = self.tag_stack.pop()
+ if self.tag_stack:
+ parent_style = self.tag_stack[-1][2]
+
+ if hn(tag):
+ self.p()
+ if start:
+ self.inheader = True
+ self.o(hn(tag)*"#" + ' ')
+ else:
+ self.inheader = False
+ return # prevent redundant emphasis marks on headers
+
+ if tag in ['p', 'div']:
+ if self.google_doc:
+ if start and google_has_height(tag_style):
+ self.p()
+ else:
+ self.soft_br()
+ else:
+ self.p()
+
+ if tag == "br" and start: self.o(" \n")
+
+ if tag == "hr" and start:
+ self.p()
+ self.o("* * *")
+ self.p()
+
+ if tag in ["head", "style", 'script']:
+ if start: self.quiet += 1
+ else: self.quiet -= 1
+
+ if tag == "style":
+ if start: self.style += 1
+ else: self.style -= 1
+
+ if tag in ["body"]:
+ self.quiet = 0 # sites like 9rules.com never close <head>
+
+ if tag == "blockquote":
+ if start:
+ self.p(); self.o('> ', 0, 1); self.start = 1
+ self.blockquote += 1
+ else:
+ self.blockquote -= 1
+ self.p()
+
+ if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
+ if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
+ if tag in ['del', 'strike', 's']:
+ if start:
+ self.o("<"+tag+">")
+ else:
+ self.o("</"+tag+">")
+
+ if self.google_doc:
+ if not self.inheader:
+ # handle some font attributes, but leave headers clean
+ self.handle_emphasis(start, tag_style, parent_style)
+
+ if tag in ["code", "tt"] and not self.pre: self.o('`') #TODO: `` `this` ``
+ if tag == "abbr":
+ if start:
+ self.abbr_title = None
+ self.abbr_data = ''
+ if has_key(attrs, 'title'):
+ self.abbr_title = attrs['title']
+ else:
+ if self.abbr_title != None:
+ self.abbr_list[self.abbr_data] = self.abbr_title
+ self.abbr_title = None
+ self.abbr_data = ''
+
+ if tag == "a" and not self.ignore_links:
+ if start:
+ if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
+ self.astack.append(attrs)
+ self.maybe_automatic_link = attrs['href']
+ else:
+ self.astack.append(None)
+ else:
+ if self.astack:
+ a = self.astack.pop()
+ if self.maybe_automatic_link:
+ self.maybe_automatic_link = None
+ elif a:
+ if self.inline_links:
+ self.o("](" + escape_md(a['href']) + ")")
+ else:
+ i = self.previousIndex(a)
+ if i is not None:
+ a = self.a[i]
+ else:
+ self.acount += 1
+ a['count'] = self.acount
+ a['outcount'] = self.outcount
+ self.a.append(a)
+ self.o("][" + str(a['count']) + "]")
+
+ if tag == "img" and start and not self.ignore_images:
+ if has_key(attrs, 'src'):
+ attrs['href'] = attrs['src']
+ alt = attrs.get('alt', '')
+ self.o("![" + escape_md(alt) + "]")
+
+ if self.inline_links:
+ self.o("(" + escape_md(attrs['href']) + ")")
+ else:
+ i = self.previousIndex(attrs)
+ if i is not None:
+ attrs = self.a[i]
+ else:
+ self.acount += 1
+ attrs['count'] = self.acount
+ attrs['outcount'] = self.outcount
+ self.a.append(attrs)
+ self.o("[" + str(attrs['count']) + "]")
+
+ if tag == 'dl' and start: self.p()
+ if tag == 'dt' and not start: self.pbr()
+ if tag == 'dd' and start: self.o(' ')
+ if tag == 'dd' and not start: self.pbr()
+
+ if tag in ["ol", "ul"]:
+ # Google Docs create sub lists as top level lists
+ if (not self.list) and (not self.lastWasList):
+ self.p()
+ if start:
+ if self.google_doc:
+ list_style = google_list_style(tag_style)
+ else:
+ list_style = tag
+ numbering_start = list_numbering_start(attrs)
+ self.list.append({'name':list_style, 'num':numbering_start})
+ else:
+ if self.list: self.list.pop()
+ self.lastWasList = True
+ else:
+ self.lastWasList = False
+
+ if tag == 'li':
+ self.pbr()
+ if start:
+ if self.list: li = self.list[-1]
+ else: li = {'name':'ul', 'num':0}
+ if self.google_doc:
+ nest_count = self.google_nest_count(tag_style)
+ else:
+ nest_count = len(self.list)
+ self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
+ if li['name'] == "ul": self.o(self.ul_item_mark + " ")
+ elif li['name'] == "ol":
+ li['num'] += 1
+ self.o(str(li['num'])+". ")
+ self.start = 1
+
+ if tag in ["table", "tr"] and start: self.p()
+ if tag == 'td': self.pbr()
+
+ if tag == "pre":
+ if start:
+ self.startpre = 1
+ self.pre = 1
+ else:
+ self.pre = 0
+ self.p()
+
+ def pbr(self):
+ if self.p_p == 0:
+ self.p_p = 1
+
+ def p(self):
+ self.p_p = 2
+
+ def soft_br(self):
+ self.pbr()
+ self.br_toggle = ' '
+
+ def o(self, data, puredata=0, force=0):
+ if self.abbr_data is not None:
+ self.abbr_data += data
+
+ if not self.quiet:
+ if self.google_doc:
+ # prevent white space immediately after 'begin emphasis' marks ('**' and '_')
+ lstripped_data = data.lstrip()
+ if self.drop_white_space and not (self.pre or self.code):
+ data = lstripped_data
+ if lstripped_data != '':
+ self.drop_white_space = 0
+
+ if puredata and not self.pre:
+ data = re.sub('\s+', ' ', data)
+ if data and data[0] == ' ':
+ self.space = 1
+ data = data[1:]
+ if not data and not force: return
+
+ if self.startpre:
+ #self.out(" :") #TODO: not output when already one there
+ if not data.startswith("\n"): # <pre>stuff...
+ data = "\n" + data
+
+ bq = (">" * self.blockquote)
+ if not (force and data and data[0] == ">") and self.blockquote: bq += " "
+
+ if self.pre:
+ if not self.list:
+ bq += " "
+ #else: list content is already partially indented
+ for i in xrange(len(self.list)):
+ bq += " "
+ data = data.replace("\n", "\n"+bq)
+
+ if self.startpre:
+ self.startpre = 0
+ if self.list:
+ data = data.lstrip("\n") # use existing initial indentation
+
+ if self.start:
+ self.space = 0
+ self.p_p = 0
+ self.start = 0
+
+ if force == 'end':
+ # It's the end.
+ self.p_p = 0
+ self.out("\n")
+ self.space = 0
+
+ if self.p_p:
+ self.out((self.br_toggle+'\n'+bq)*self.p_p)
+ self.space = 0
+ self.br_toggle = ''
+
+ if self.space:
+ if not self.lastWasNL: self.out(' ')
+ self.space = 0
+
+ if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
+ if force == "end": self.out("\n")
+
+ newa = []
+ for link in self.a:
+ if self.outcount > link['outcount']:
+ self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
+ if has_key(link, 'title'): self.out(" ("+link['title']+")")
+ self.out("\n")
+ else:
+ newa.append(link)
+
+ if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
+
+ self.a = newa
+
+ if self.abbr_list and force == "end":
+ for abbr, definition in self.abbr_list.items():
+ self.out(" *[" + abbr + "]: " + definition + "\n")
+
+ self.p_p = 0
+ self.out(data)
+ self.outcount += 1
+
+ def handle_data(self, data):
+ if r'\/script>' in data: self.quiet -= 1
+
+ if self.style:
+ self.style_def.update(dumb_css_parser(data))
+
+ if not self.maybe_automatic_link is None:
+ href = self.maybe_automatic_link
+ if href == data and self.absolute_url_matcher.match(href):
+ self.o("<" + data + ">")
+ return
+ else:
+ self.o("[")
+ self.maybe_automatic_link = None
+
+ if not self.code and not self.pre:
+ data = escape_md_section(data, snob=self.escape_snob)
+ self.o(data, 1)
+
+ def unknown_decl(self, data): pass
+
+ def charref(self, name):
+ if name[0] in ['x','X']:
+ c = int(name[1:], 16)
+ else:
+ c = int(name)
+
+ if not self.unicode_snob and c in unifiable_n.keys():
+ return unifiable_n[c]
+ else:
+ try:
+ return unichr(c)
+ except NameError: #Python3
+ return chr(c)
+
+ def entityref(self, c):
+ if not self.unicode_snob and c in unifiable.keys():
+ return unifiable[c]
+ else:
+ try: name2cp(c)
+ except KeyError: return "&" + c + ';'
+ else:
+ try:
+ return unichr(name2cp(c))
+ except NameError: #Python3
+ return chr(name2cp(c))
+
+ def replaceEntities(self, s):
+ s = s.group(1)
+ if s[0] == "#":
+ return self.charref(s[1:])
+ else: return self.entityref(s)
+
+ r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
+ def unescape(self, s):
+ return self.r_unescape.sub(self.replaceEntities, s)
+
+ def google_nest_count(self, style):
+ """calculate the nesting count of google doc lists"""
+ nest_count = 0
+ if 'margin-left' in style:
+ nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
+ return nest_count
+
+
+ def optwrap(self, text):
+ """Wrap all paragraphs in the provided text."""
+ if not self.body_width:
+ return text
+
+ assert wrap, "Requires Python 2.3."
+ result = ''
+ newlines = 0
+ for para in text.split("\n"):
+ if len(para) > 0:
+ if not skipwrap(para):
+ result += "\n".join(wrap(para, self.body_width))
+ if para.endswith(' '):
+ result += " \n"
+ newlines = 1
+ else:
+ result += "\n\n"
+ newlines = 2
+ else:
+ if not onlywhite(para):
+ result += para + "\n"
+ newlines = 1
+ else:
+ if newlines < 2:
+ result += "\n"
+ newlines += 1
+ return result
+
+ordered_list_matcher = re.compile(r'\d+\.\s')
+unordered_list_matcher = re.compile(r'[-\*\+]\s')
+md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
+md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
+md_dot_matcher = re.compile(r"""
+ ^ # start of line
+ (\s*\d+) # optional whitespace and a number
+ (\.) # dot
+ (?=\s) # lookahead assert whitespace
+ """, re.MULTILINE | re.VERBOSE)
+md_plus_matcher = re.compile(r"""
+ ^
+ (\s*)
+ (\+)
+ (?=\s)
+ """, flags=re.MULTILINE | re.VERBOSE)
+md_dash_matcher = re.compile(r"""
+ ^
+ (\s*)
+ (-)
+ (?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
+ # or another dash (header or hr)
+ """, flags=re.MULTILINE | re.VERBOSE)
+slash_chars = r'\`*_{}[]()#+-.!'
+md_backslash_matcher = re.compile(r'''
+ (\\) # match one slash
+ (?=[%s]) # followed by a char that requires escaping
+ ''' % re.escape(slash_chars),
+ flags=re.VERBOSE)
+
+def skipwrap(para):
+ # If the text begins with four spaces or one tab, it's a code block; don't wrap
+ if para[0:4] == ' ' or para[0] == '\t':
+ return True
+ # If the text begins with only two "--", possibly preceded by whitespace, that's
+ # an emdash; so wrap.
+ stripped = para.lstrip()
+ if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
+ return False
+ # I'm not sure what this is for; I thought it was to detect lists, but there's
+ # a <br>-inside-<span> case in one of the tests that also depends upon it.
+ if stripped[0:1] == '-' or stripped[0:1] == '*':
+ return True
+ # If the text begins with a single -, *, or +, followed by a space, or an integer,
+ # followed by a ., followed by a space (in either case optionally preceeded by
+ # whitespace), it's a list; don't wrap.
+ if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
+ return True
+ return False
+
+def wrapwrite(text):
+ text = text.encode('utf-8')
+ try: #Python3
+ sys.stdout.buffer.write(text)
+ except AttributeError:
+ sys.stdout.write(text)
+
+def html2text(html, baseurl=''):
+ h = HTML2Text(baseurl=baseurl)
+ return h.handle(html)
+
+def unescape(s, unicode_snob=False):
+ h = HTML2Text()
+ h.unicode_snob = unicode_snob
+ return h.unescape(s)
+
+def escape_md(text):
+ """Escapes markdown-sensitive characters within other markdown constructs."""
+ return md_chars_matcher.sub(r"\\\1", text)
+
+def escape_md_section(text, snob=False):
+ """Escapes markdown-sensitive characters across whole document sections."""
+ text = md_backslash_matcher.sub(r"\\\1", text)
+ if snob:
+ text = md_chars_matcher_all.sub(r"\\\1", text)
+ text = md_dot_matcher.sub(r"\1\\\2", text)
+ text = md_plus_matcher.sub(r"\1\\\2", text)
+ text = md_dash_matcher.sub(r"\1\\\2", text)
+ return text
+
+
+def main():
+ baseurl = ''
+
+ p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
+ version='%prog ' + __version__)
+ p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
+ default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
+ p.add_option("--ignore-links", dest="ignore_links", action="store_true",
+ default=IGNORE_ANCHORS, help="don't include any formatting for links")
+ p.add_option("--ignore-images", dest="ignore_images", action="store_true",
+ default=IGNORE_IMAGES, help="don't include any formatting for images")
+ p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
+ default=False, help="convert an html-exported Google Document")
+ p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
+ default=False, help="use a dash rather than a star for unordered list items")
+ p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
+ default=False, help="use an asterisk rather than an underscore for emphasized text")
+ p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
+ default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
+ p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
+ default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
+ p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
+ default=False, help="hide strike-through text. only relevant when -g is specified as well")
+ p.add_option("--escape-all", action="store_true", dest="escape_snob",
+ default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
+ (options, args) = p.parse_args()
+
+ # process input
+ encoding = "utf-8"
+ if len(args) > 0:
+ file_ = args[0]
+ if len(args) == 2:
+ encoding = args[1]
+ if len(args) > 2:
+ p.error('Too many arguments')
+
+ if file_.startswith('http://') or file_.startswith('https://'):
+ baseurl = file_
+ j = urllib.urlopen(baseurl)
+ data = j.read()
+ if encoding is None:
+ try:
+ from feedparser import _getCharacterEncoding as enc
+ except ImportError:
+ enc = lambda x, y: ('utf-8', 1)
+ encoding = enc(j.headers, data)[0]
+ if encoding == 'us-ascii':
+ encoding = 'utf-8'
+ else:
+ data = open(file_, 'rb').read()
+ if encoding is None:
+ try:
+ from chardet import detect
+ except ImportError:
+ detect = lambda x: {'encoding': 'utf-8'}
+ encoding = detect(data)['encoding']
+ else:
+ data = sys.stdin.read()
+
+ data = data.decode(encoding)
+ h = HTML2Text(baseurl=baseurl)
+ # handle options
+ if options.ul_style_dash: h.ul_item_mark = '-'
+ if options.em_style_asterisk:
+ h.emphasis_mark = '*'
+ h.strong_mark = '__'
+
+ h.body_width = options.body_width
+ h.list_indent = options.list_indent
+ h.ignore_emphasis = options.ignore_emphasis
+ h.ignore_links = options.ignore_links
+ h.ignore_images = options.ignore_images
+ h.google_doc = options.google_doc
+ h.hide_strikethrough = options.hide_strikethrough
+ h.escape_snob = options.escape_snob
+
+ wrapwrite(h.handle(data))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/bin/importer.py b/bin/importer.py
new file mode 100644
index 0000000..592b84d
--- /dev/null
+++ b/bin/importer.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python3
+# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
+
+# Copyright 2014-2019 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
+# Copyright 2014-2018 Claude (longneck) <longneck@scratchbook.ch>
+
+# This file is part of qutebrowser.
+#
+# qutebrowser is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# qutebrowser is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
+
+
+"""Tool to import data from other browsers.
+
+Currently importing bookmarks from Netscape Bookmark files and Mozilla
+profiles is supported.
+"""
+
+
+import argparse
+import sqlite3
+import os
+import urllib.parse
+import json
+import string
+
+browser_default_input_format = {
+ 'chromium': 'chrome',
+ 'chrome': 'chrome',
+ 'ie': 'netscape',
+ 'firefox': 'mozilla',
+ 'seamonkey': 'mozilla',
+ 'palemoon': 'mozilla',
+}
+
+
+def main():
+ args = get_args()
+ bookmark_types = []
+ output_format = None
+ input_format = args.input_format
+ if args.search_output:
+ bookmark_types = ['search']
+ if args.oldconfig:
+ output_format = 'oldsearch'
+ else:
+ output_format = 'search'
+ else:
+ if args.bookmark_output:
+ output_format = 'bookmark'
+ elif args.quickmark_output:
+ output_format = 'quickmark'
+ if args.import_bookmarks:
+ bookmark_types.append('bookmark')
+ if args.import_keywords:
+ bookmark_types.append('keyword')
+ if not bookmark_types:
+ bookmark_types = ['bookmark', 'keyword']
+ if not output_format:
+ output_format = 'quickmark'
+ if not input_format:
+ if args.browser:
+ input_format = browser_default_input_format[args.browser]
+ else:
+ #default to netscape
+ input_format = 'netscape'
+
+ import_function = {
+ 'netscape': import_netscape_bookmarks,
+ 'mozilla': import_moz_places,
+ 'chrome': import_chrome,
+ }
+ import_function[input_format](args.bookmarks, bookmark_types,
+ output_format)
+
+
+def get_args():
+ """Get the argparse parser."""
+ parser = argparse.ArgumentParser(
+ epilog="To import bookmarks from Chromium, Firefox or IE, "
+ "export them to HTML in your browsers bookmark manager. ")
+ parser.add_argument(
+ 'browser',
+ help="Which browser? {%(choices)s}",
+ choices=browser_default_input_format.keys(),
+ nargs='?',
+ metavar='browser')
+ parser.add_argument(
+ '-i',
+ '--input-format',
+ help='Which input format? (overrides browser default; "netscape" if '
+ 'neither given)',
+ choices=set(browser_default_input_format.values()),
+ required=False)
+ parser.add_argument(
+ '-b',
+ '--bookmark-output',
+ help="Output in bookmark format.",
+ action='store_true',
+ default=False,
+ required=False)
+ parser.add_argument(
+ '-q',
+ '--quickmark-output',
+ help="Output in quickmark format (default).",
+ action='store_true',
+ default=False,
+ required=False)
+ parser.add_argument(
+ '-s',
+ '--search-output',
+ help="Output config.py search engine format (negates -B and -K)",
+ action='store_true',
+ default=False,
+ required=False)
+ parser.add_argument(
+ '--oldconfig',
+ help="Output search engine format for old qutebrowser.conf format",
+ default=False,
+ action='store_true',
+ required=False)
+ parser.add_argument(
+ '-B',
+ '--import-bookmarks',
+ help="Import plain bookmarks (can be combiend with -K)",
+ action='store_true',
+ default=False,
+ required=False)
+ parser.add_argument(
+ '-K',
+ '--import-keywords',
+ help="Import keywords (can be combined with -B)",
+ action='store_true',
+ default=False,
+ required=False)
+ parser.add_argument(
+ 'bookmarks',
+ help="Bookmarks file (html format) or "
+ "profile folder (Mozilla format)")
+ args = parser.parse_args()
+ return args
+
+
+def search_escape(url):
+ """Escape URLs such that preexisting { and } are handled properly.
+
+ Will obviously trash a properly-formatted qutebrowser URL.
+ """
+ return url.replace('{', '{{').replace('}', '}}')
+
+
+def opensearch_convert(url):
+ """Convert a basic OpenSearch URL into something qutebrowser can use.
+
+ Exceptions:
+ KeyError:
+ An unknown and required parameter is present in the URL. This
+ usually means there's browser/addon specific functionality needed
+ to build the URL (I'm looking at you and your browser, Google) that
+ obviously won't be present here.
+ """
+ subst = {
+ 'searchTerms': '%s', # for proper escaping later
+ 'language': '*',
+ 'inputEncoding': 'UTF-8',
+ 'outputEncoding': 'UTF-8'
+ }
+
+ # remove optional parameters (even those we don't support)
+ for param in string.Formatter().parse(url):
+ if param[1]:
+ if param[1].endswith('?'):
+ url = url.replace('{' + param[1] + '}', '')
+ elif param[2] and param[2].endswith('?'):
+ url = url.replace('{' + param[1] + ':' + param[2] + '}', '')
+ return search_escape(url.format(**subst)).replace('%s', '{}')
+
+
+def import_netscape_bookmarks(bookmarks_file, bookmark_types, output_format):
+ """Import bookmarks from a NETSCAPE-Bookmark-file v1.
+
+ Generated by Chromium, Firefox, IE and possibly more browsers. Not all
+ export all possible bookmark types:
+ - Firefox mostly works with everything
+ - Chrome doesn't support keywords at all; searches are a separate
+ database
+ """
+ import bs4
+ with open(bookmarks_file, encoding='utf-8') as f:
+ soup = bs4.BeautifulSoup(f, 'html.parser')
+ bookmark_query = {
+ 'search': lambda tag: (
+ (tag.name == 'a') and
+ ('shortcuturl' in tag.attrs) and
+ ('%s' in tag['href'])),
+ 'keyword': lambda tag: (
+ (tag.name == 'a') and
+ ('shortcuturl' in tag.attrs) and
+ ('%s' not in tag['href'])),
+ 'bookmark': lambda tag: (
+ (tag.name == 'a') and
+ ('shortcuturl' not in tag.attrs) and
+ (tag.string)),
+ }
+ output_template = {
+ 'search': {
+ 'search':
+ "c.url.searchengines['{tag[shortcuturl]}'] = "
+ "'{tag[href]}' #{tag.string}"
+ },
+ 'oldsearch': {
+ 'search': '{tag[shortcuturl]} = {tag[href]} #{tag.string}',
+ },
+ 'bookmark': {
+ 'bookmark': '{tag[href]} {tag.string}',
+ 'keyword': '{tag[href]} {tag.string}'
+ },
+ 'quickmark': {
+ 'bookmark': '{tag.string} {tag[href]}',
+ 'keyword': '{tag[shortcuturl]} {tag[href]}'
+ }
+ }
+ bookmarks = []
+ for typ in bookmark_types:
+ tags = soup.findAll(bookmark_query[typ])
+ for tag in tags:
+ if typ == 'search':
+ tag['href'] = search_escape(tag['href']).replace('%s', '{}')
+ if tag['href'] not in bookmarks:
+ bookmarks.append(
+ output_template[output_format][typ].format(tag=tag))
+ for bookmark in bookmarks:
+ print(bookmark)
+
+
+def import_moz_places(profile, bookmark_types, output_format):
+ """Import bookmarks from a Mozilla profile's places.sqlite database."""
+ place_query = {
+ 'bookmark': (
+ "SELECT DISTINCT moz_bookmarks.title,moz_places.url "
+ "FROM moz_bookmarks,moz_places "
+ "WHERE moz_places.id=moz_bookmarks.fk "
+ "AND moz_places.id NOT IN (SELECT place_id FROM moz_keywords) "
+ "AND moz_places.url NOT LIKE 'place:%';"
+ ), # Bookmarks with no keywords assigned
+ 'keyword': (
+ "SELECT moz_keywords.keyword,moz_places.url "
+ "FROM moz_keywords,moz_places,moz_bookmarks "
+ "WHERE moz_places.id=moz_bookmarks.fk "
+ "AND moz_places.id=moz_keywords.place_id "
+ "AND moz_places.url NOT LIKE '%!%s%' ESCAPE '!';"
+ ), # Bookmarks with keywords assigned but no %s substitution
+ 'search': (
+ "SELECT moz_keywords.keyword, "
+ " moz_bookmarks.title, "
+ " search_conv(moz_places.url) AS url "
+ "FROM moz_keywords,moz_places,moz_bookmarks "
+ "WHERE moz_places.id=moz_bookmarks.fk "
+ "AND moz_places.id=moz_keywords.place_id "
+ "AND moz_places.url LIKE '%!%s%' ESCAPE '!';"
+ ) # bookmarks with keyword and %s substitution
+ }
+ out_template = {
+ 'bookmark': {
+ 'bookmark': '{url} {title}',
+ 'keyword': '{url} {keyword}'
+ },
+ 'quickmark': {
+ 'bookmark': '{title} {url}',
+ 'keyword': '{keyword} {url}'
+ },
+ 'oldsearch': {
+ 'search': '{keyword} {url} #{title}'
+ },
+ 'search': {
+ 'search': "c.url.searchengines['{keyword}'] = '{url}' #{title}"
+ }
+ }
+
+ def search_conv(url):
+ return search_escape(url).replace('%s', '{}')
+
+ places = sqlite3.connect(os.path.join(profile, "places.sqlite"))
+ places.create_function('search_conv', 1, search_conv)
+ places.row_factory = sqlite3.Row
+ c = places.cursor()
+ for typ in bookmark_types:
+ c.execute(place_query[typ])
+ for row in c:
+ print(out_template[output_format][typ].format(**row))
+
+
+def import_chrome(profile, bookmark_types, output_format):
+ """Import bookmarks and search keywords from Chrome-type profiles.
+
+ On Chrome, keywords and search engines are the same thing and handled in
+ their own database table; bookmarks cannot have associated keywords. This
+ is why the dictionary lookups here are much simpler.
+ """
+ out_template = {
+ 'bookmark': '{url} {name}',
+ 'quickmark': '{name} {url}',
+ 'search': "c.url.searchengines['{keyword}'] = '{url}'",
+ 'oldsearch': '{keyword} {url}'
+ }
+
+ if 'search' in bookmark_types:
+ webdata = sqlite3.connect(os.path.join(profile, 'Web Data'))
+ c = webdata.cursor()
+ c.execute('SELECT keyword,url FROM keywords;')
+ for keyword, url in c:
+ try:
+ url = opensearch_convert(url)
+ print(out_template[output_format].format(
+ keyword=keyword, url=url))
+ except KeyError:
+ print('# Unsupported parameter in url for {}; skipping....'.
+ format(keyword))
+
+ else:
+ with open(os.path.join(profile, 'Bookmarks'), encoding='utf-8') as f:
+ bookmarks = json.load(f)
+
+ def bm_tree_walk(bm, template):
+ """Recursive function to walk through bookmarks."""
+ if not isinstance(bm, dict):
+ return
+ assert 'type' in bm, bm
+ if bm['type'] == 'url':
+ if urllib.parse.urlparse(bm['url']).scheme != 'chrome':
+ print(template.format(**bm))
+ elif bm['type'] == 'folder':
+ for child in bm['children']:
+ bm_tree_walk(child, template)
+
+ for root in bookmarks['roots'].values():
+ bm_tree_walk(root, out_template[output_format])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/instagram.sh b/bin/instagram.sh
new file mode 100755
index 0000000..0ff8cec
--- /dev/null
+++ b/bin/instagram.sh
@@ -0,0 +1,2 @@
+#! /bin/sh
+GDK_BACKEND=x11 exo-open ~/bin/apps/gram.desktop
diff --git a/bin/kanbanscripts/addtask.sh b/bin/kanbanscripts/addtask.sh
new file mode 100755
index 0000000..6099772
--- /dev/null
+++ b/bin/kanbanscripts/addtask.sh
@@ -0,0 +1,2 @@
+#! /bin/sh
+touch ~/gtd/"${2:-FLW} - $1".txt
diff --git a/bin/kanbanscripts/alltasks.py b/bin/kanbanscripts/alltasks.py
new file mode 100755
index 0000000..197c4ba
--- /dev/null
+++ b/bin/kanbanscripts/alltasks.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+import os
+from os.path import abspath, dirname
+from operator import itemgetter
+
+
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+
+
+# assuming that this script is in ~/bin/kanban this will work:
+BASE_DIR = abspath(dirname(dirname(dirname(__file__)))) + '/'
+project_list = []
+waiting_list = []
+for root, dirnames, filenames in os.walk('%sgtd' % (BASE_DIR)):
+ for f in filenames:
+ waiting = False
+ inprogress = False
+ if not f.startswith('@') and not f.startswith('.') and not f.startswith('Notes & Settings') and not f.startswith('projx') and not f.startswith("errands"):
+
+ task = f.split("-")[0].strip()
+ name = f.split("-")[1].strip()
+ if len(task) > 3:
+ inprogress = True
+ with open('%sgtd/%s' % (BASE_DIR, f), "r") as fl:
+ for line in fl:
+ for part in line.split():
+ if "@waiting" in part:
+ waiting = True
+ project_list.append({
+ "file": f,
+ "name": name,
+ "task": task,
+ "inprogress": inprogress,
+ "waiting": waiting,
+ })
+ break
+
+newlist = sorted(project_list, key=itemgetter('name', 'task'))
+
+# projects_file = open("/Users/sng/gtd/@projects.txt", 'w')
+l_name = ''
+for i, p in enumerate(newlist):
+ if not p['inprogress'] and not p['waiting']:
+ if l_name == p['name']:
+ print(p['file'][:-4])
+ else:
+ print(p['file'][:-4])
+ l_name = p['name']
+
+"""
+for i, p in enumerate(newlist):
+ if i == 0:
+ print(bcolors.WARNING + "\n------ TASKS WAITING -----" + bcolors.ENDC)
+ if p['waiting'] is True:
+ print(p['file'][:-4])
+"""
diff --git a/bin/kanbanscripts/alltasks_by_type.py b/bin/kanbanscripts/alltasks_by_type.py
new file mode 100755
index 0000000..e2affd8
--- /dev/null
+++ b/bin/kanbanscripts/alltasks_by_type.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python
+import os
+import sys
+from os.path import abspath, dirname
+from operator import itemgetter
+
+ARG = sys.argv[1]
+
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+# assuming that this script is in ~/bin this will work:
+BASE_DIR = abspath(dirname(dirname(__file__))) + '/'
+project_list = []
+waiting_list = []
+for root, dirnames, filenames in os.walk('%sgtd' % (BASE_DIR)):
+ for f in filenames:
+ waiting = False
+ inprogress = False
+ if not f.startswith('@') and not f.startswith('.') and not f.startswith('Notes & Settings') and not f.startswith('projx') and not f.startswith("errands"):
+ name = f[f.find("[") + 1:f.find("]")]
+ task = f[f.find("]") + 2:-4]
+ with open('%sgtd/%s' % (BASE_DIR, f), "r") as fl:
+ for line in fl:
+ for part in line.split():
+ if "@waiting" in part:
+ waiting = True
+ if name == ARG:
+ project_list.append({
+ "file": f,
+ "name": name,
+ "task": task,
+ "inprogress": inprogress,
+ "waiting": waiting,
+ })
+ break
+
+newlist = sorted(project_list, key=itemgetter('name', 'task'))
+
+#projects_file = open("/Users/sng/gtd/@projects.txt", 'w')
+l_name = ''
+for i,p in enumerate(newlist):
+ if i == 0:
+ print bcolors.WARNING + "------ %s TASKS -----" %(ARG) + bcolors.ENDC
+ if not p['inprogress'] and not p['waiting']:
+ if l_name == p['name']:
+ print p['file'][:-4]
+ else:
+ if i != 0:
+ print " "
+ print p['file'][:-4]
+ l_name = p['name']
diff --git a/bin/kanbanscripts/havedone.sh b/bin/kanbanscripts/havedone.sh
new file mode 100755
index 0000000..fb47287
--- /dev/null
+++ b/bin/kanbanscripts/havedone.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+cd ~/gtd
+mkdir -p ~/gtd/done/$(date '+%Y-%m-%d')
+OIFS="$IFS"
+IFS=$'\n'
+for f in $(find . -maxdepth 1 -type f -print0 | xargs -0 grep -li @done)
+ do
+ printf $f
+ mv $f ~/gtd/done/$(date '+%Y-%m-%d')
+ done
+IFS="$OIFS"
diff --git a/bin/kanbanscripts/havedonegit.sh b/bin/kanbanscripts/havedonegit.sh
new file mode 100755
index 0000000..59cb066
--- /dev/null
+++ b/bin/kanbanscripts/havedonegit.sh
@@ -0,0 +1,10 @@
+OIFS="$IFS"
+IFS=$'\n'
+for f in $(git log --branches --pretty=format:"%s" --since="5am")
+ do
+ #watch out for / in commit messages
+ nf=${f//\//:}
+ echo $nf
+ touch ~/gtd/done/$(date '+%Y-%m-%d')/"$nf".txt
+ done
+IFS="$OIFS"
diff --git a/bin/kanbanscripts/review.py b/bin/kanbanscripts/review.py
new file mode 100755
index 0000000..a17cccc
--- /dev/null
+++ b/bin/kanbanscripts/review.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+import os
+from os.path import abspath, dirname
+from operator import itemgetter
+
+"""
+ parse my gtd folder
+ pull out files that start with the project name which is in []
+ loop through and grab the project and then all the associated
+ tasks (rest of the file name)
+ print the results out to a file called @project.txt
+"""
+# assuming that this script is in ~/bin this will work:
+BASE_DIR = abspath(dirname(dirname(__file__))) + '/'
+project_list = []
+inprogress = False
+for root, dirnames, filenames in os.walk('%sgtd' % (BASE_DIR)):
+ for f in filenames:
+ if not f.startswith('@') and not f.startswith('.') and not f.startswith('Notes & Settings') and not f.startswith('projx') and not f.startswith("errands"):
+
+ name = f[f.find("[") + 1:f.find("]")]
+ task = f[f.find("]") + 2:-4]
+
+ if f.find("]") > 5:
+ inprogress = True
+ task = "* " + task
+ with open('%sgtd/%s' % (BASE_DIR, f), "r") as fl:
+ for line in fl:
+ for part in line.split():
+ if "@waiting" in part:
+ task = task + " -- " + line.rstrip()
+ project_list.append({
+ "file": f,
+ "name": name,
+ "task": task,
+ "inprogress": inprogress,
+ })
+ break
+
+newlist = sorted(project_list, key=itemgetter('name', 'task'))
+
+projects_file = open("/Users/sng/gtd/@projects.txt", 'w')
+l_name = ''
+
+for p in newlist:
+ if l_name == p['name']:
+ print >> projects_file, "\t" + p['task']
+ else:
+ print >> projects_file, "\n" + p['name']
+ print >> projects_file, "\t" + p['task']
+ l_name = p['name']
diff --git a/bin/kanbanscripts/showall.sh b/bin/kanbanscripts/showall.sh
new file mode 100755
index 0000000..968e6ce
--- /dev/null
+++ b/bin/kanbanscripts/showall.sh
@@ -0,0 +1,4 @@
+clear
+echo '---- ALL TASKS -----'
+python ~/bin/kanbanscripts/alltasks.py
+echo '----'
diff --git a/bin/kanbanscripts/showalltype.sh b/bin/kanbanscripts/showalltype.sh
new file mode 100755
index 0000000..149fc5c
--- /dev/null
+++ b/bin/kanbanscripts/showalltype.sh
@@ -0,0 +1,11 @@
+GREEN=$(tput setaf 2; tput bold)
+NORMAL=$(tput sgr0)
+function green() {
+ echo "$GREEN$*$NORMAL"
+}
+
+clear
+echo '---- TODO -----'
+python ~/bin/alltasks_by_type.py $1
+echo '.'
+echo '.'
diff --git a/bin/kanbanscripts/showdone.sh b/bin/kanbanscripts/showdone.sh
new file mode 100755
index 0000000..7e3d621
--- /dev/null
+++ b/bin/kanbanscripts/showdone.sh
@@ -0,0 +1,4 @@
+clear
+echo '----- TASKS COMPLETED TODAY -----'
+cd ~/gtd/done/$(date '+%Y-%m-%d')
+ls | grep ''
diff --git a/bin/kanbanscripts/showinprogress.sh b/bin/kanbanscripts/showinprogress.sh
new file mode 100755
index 0000000..9b446dc
--- /dev/null
+++ b/bin/kanbanscripts/showinprogress.sh
@@ -0,0 +1,3 @@
+clear
+echo '----- TASKS IN PROGRESS -----'
+cd ~/gtd && ls | grep ^qq | cut -c 4- | cut -f 1 -d '.'
diff --git a/bin/kanbanscripts/workon.sh b/bin/kanbanscripts/workon.sh
new file mode 100755
index 0000000..90a6e1f
--- /dev/null
+++ b/bin/kanbanscripts/workon.sh
@@ -0,0 +1,2 @@
+#! /bin/sh
+mv {,qq\ }"$1"
diff --git a/bin/lux-video-compress.sh b/bin/lux-video-compress.sh
new file mode 100755
index 0000000..0ff4d9d
--- /dev/null
+++ b/bin/lux-video-compress.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+BITRATE=$2
+FILE=$1
+ffmpeg -i $1 -codec:v libx264 -profile:v high -preset slower -b:v ${BITRATE:=2000}k -vf scale=-1:720 -threads 0 -codec:a aac -strict experimental "${FILE%%.*}"-web.mp4
+ffmpeg -i $1 -c:v libvpx -quality good -cpu-used 0 -b:v ${BITRATE:=2000}k -qmin 10 -qmax 42 -maxrate 500k -bufsize 1500k -threads 8 -vf scale=-1:720 -c:a libvorbis -b:a 192k -f webm "${FILE%%.*}"-web.webm
diff --git a/bin/maildir2mbox.py b/bin/maildir2mbox.py
new file mode 100644
index 0000000..2efc7b5
--- /dev/null
+++ b/bin/maildir2mbox.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Frédéric Grosshans, 19 January 2012
+Nathan R. Yergler, 6 June 2010
+
+This file does not contain sufficient creative expression to invoke
+assertion of copyright. No warranty is expressed or implied; use at
+your own risk.
+
+---
+
+Uses Python's included mailbox library to convert mail archives from
+maildir [http://en.wikipedia.org/wiki/Maildir] to
+mbox [http://en.wikipedia.org/wiki/Mbox] format, icluding subfolder.
+
+See http://docs.python.org/library/mailbox.html#mailbox.Mailbox for
+full documentation on this library.
+
+---
+
+To run, save as md2mb.py and run:
+
+$ python md2mb.py [maildir_path] [mbox_filename]
+
+[maildir_path] should be the the path to the actual maildir (containing new,
+cur, tmp, and the subfolders, which are hidden directories with names like
+.subfolde.subsubfolder.subsubsbfolder);
+
+[mbox_filename] will be newly created, as well as a [mbox_filename].sbd the
+directory.
+"""
+
+import mailbox
+import sys
+import email
+import os
+
+def maildir2mailbox(maildirname, mboxfilename):
+ """
+ slightly adapted from maildir2mbox.py,
+ Nathan R. Yergler, 6 June 2010
+ http://yergler.net/blog/2010/06/06/batteries-included-or-maildir-to-mbox-again/
+
+
+ """
+ # open the existing maildir and the target mbox file
+ maildir = mailbox.Maildir(maildirname, email.message_from_file)
+ mbox = mailbox.mbox(mboxfilename)
+
+ # lock the mbox
+ mbox.lock()
+
+ # iterate over messages in the maildir and add to the mbox
+ for msg in maildir:
+ mbox.add(msg)
+
+ # close and unlock
+ mbox.close()
+ maildir.close()
+
+
+dirname=sys.argv[-2]
+mboxname=sys.argv[-1]
+print(dirname +' -> ' +mboxname)
+mboxdirname=mboxname+'.sbd'
+maildir2mailbox(dirname,mboxname)
+if not os.path.exists(mboxdirname): os.makedirs(mboxdirname)
+
+listofdirs=[dn for dn in os.walk(dirname).next()[1] if dn not in ['new', 'cur', 'tmp']]
+for curfold in listofdirs:
+ curlist=[mboxname]+curfold.split('.')
+ curpath=os.path.join(*[dn+'.sbd' for dn in curlist if dn])
+ if not os.path.exists(curpath): os.makedirs(curpath)
+ print('| ' +curfold +' -> '+curpath[:-4])
+ maildir2mailbox(os.path.join(dirname,curfold),curpath[:-4])
+
+print('Done') \ No newline at end of file
diff --git a/bin/messages.sh b/bin/messages.sh
new file mode 100755
index 0000000..6798742
--- /dev/null
+++ b/bin/messages.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+exo-open ~/bin/apps/messages.desktop
diff --git a/bin/moonphase.py b/bin/moonphase.py
new file mode 100755
index 0000000..1bee795
--- /dev/null
+++ b/bin/moonphase.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+moonphase.py - Calculate Lunar Phase
+Author: Sean B. Palmer, inamidst.com
+Cf. http://en.wikipedia.org/wiki/Lunar_phase#Lunar_phase_calculation
+"""
+
+import math
+import decimal
+import datetime
+
+dec = decimal.Decimal
+
+
+def position(now=None):
+ if now is None:
+ now = datetime.datetime.now()
+
+ diff = now - datetime.datetime(2001, 1, 1)
+ days = dec(diff.days) + (dec(diff.seconds) / dec(86400))
+ lunations = dec("0.20439731") + (days * dec("0.03386319269"))
+
+ return lunations % dec(1)
+
+
+def phase(pos):
+ index = (pos * dec(8)) + dec("0.5")
+ index = math.floor(index)
+ return {
+ 0: "New Moon",
+ 1: "Waxing Crescent",
+ 2: "First Quarter",
+ 3: "Waxing Gibbous",
+ 4: "Full Moon",
+ 5: "Waning Gibbous",
+ 6: "Last Quarter",
+ 7: "Waning Crescent"
+ }[int(index) & 7]
+
+
+def main():
+ pos = position()
+ phasename = phase(pos)
+
+ roundedpos = round(float(pos), 3)
+ num_days = float(roundedpos * 29.6)
+ print("%s (%s) %s days" % (phasename, roundedpos, num_days))
+
+if __name__=="__main__":
+ main()
diff --git a/bin/mpd-playlist-export.py b/bin/mpd-playlist-export.py
new file mode 100755
index 0000000..0e1e393
--- /dev/null
+++ b/bin/mpd-playlist-export.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+"""
+Simple tool that copies current mpd playlist's files to an external directory.
+It also creates a m3u playlist file there (eg. for Android devices).
+"""
+
+__author__ = 'Marcin Rataj (http://lidel.org)'
+__license__ = 'CC0 (public domain)'
+
+
+# requires: python-mpd >=0.3.0 (http://jatreuman.indefero.net/p/python-mpd/)
+from mpd import (MPDClient, CommandError)
+from random import choice
+from socket import error as SocketError
+from shutil import copy
+import argparse
+import os
+import sys
+
+
+## SETTINGS
+HOST = 'localhost'
+PORT = '6600'
+PASSWORD = False
+LIBRARY_DIR = '/home/lxf/music/library'
+
+
+def get_paths(root):
+ client = MPDClient()
+ client.connect(host=HOST, port=PORT)
+
+ if PASSWORD:
+ client.password(PASSWORD)
+
+ playlist = client.playlist()
+ client.disconnect()
+
+ return [entry.replace('file: ', root) for entry in playlist if entry.startswith('file: ')]
+
+
+def copy_files(files, args):
+ dest = args.output
+ if not os.path.exists(dest):
+ os.makedirs(dest)
+ for file in files:
+ if not args.quiet:
+ print("copying '{0}' to '{1}'".format(os.path.basename(file), dest))
+ copy(file, dest)
+
+
+def create_playlist(files, args):
+ dest = args.output
+ name = os.path.basename(os.path.normpath(args.output))
+ playlist_file = open("{0}/{1}.m3u".format(dest, name), 'w')
+ for song in files:
+ playlist_file.write(os.path.basename(song) + '\n')
+ playlist_file.close()
+
+
+def main(parser):
+ try:
+ args = parser.parse_args()
+ playlist_files = get_paths(args.library+"/")
+ copy_files(playlist_files, args)
+ create_playlist(playlist_files, args)
+ except Exception as e:
+ print("Error occured:\n{0}\nAborted.".format(e))
+ return 1
+ else:
+ return 0
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('-quiet', default=False, action='store_true', help='Be quiet.')
+ parser.add_argument('-output', required=True, help='Output directory. (required)')
+ parser.add_argument('-library', default=LIBRARY_DIR, help="MPD library path. Default: {0}".format(LIBRARY_DIR))
+ sys.exit(main(parser))
+
+# vim: ai ts=4 sw=4 sts=4 expandtab
diff --git a/bin/mqq.sh b/bin/mqq.sh
new file mode 100755
index 0000000..6543d8e
--- /dev/null
+++ b/bin/mqq.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+# $HOME/Scripts/mqq
+
+#message_id=$(grep -E -i "^Message-id:" | sed -e 's/.*<\(.*\)>/\1/')
+BODY=$(sed -n '/^Date/,$ p' | grep -E -i '^[^X]+' | sed -En '/^Date/,/application\// p')
+echo "${BODY}" | sed -En '/^Date/,/text\/html/ p' > /Users/sng/Notes/"$1".txt
diff --git a/bin/mts-convert.sh b/bin/mts-convert.sh
new file mode 100755
index 0000000..8780436
--- /dev/null
+++ b/bin/mts-convert.sh
@@ -0,0 +1,3 @@
+for f in *.MTS; do
+ ffmpeg -i "$f" -vcodec copy -acodec copy ${f%.MTS}.mp4;
+done
diff --git a/bin/mutt-notmuch-py.py b/bin/mutt-notmuch-py.py
new file mode 100755
index 0000000..60708c5
--- /dev/null
+++ b/bin/mutt-notmuch-py.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+"""
+mutt-notmuch-py
+
+This is a Gmail-only version of the original mutt-notmuch script.
+
+It will interactively ask you for a search query and then symlink the matching
+messages to $HOME/.cache/mutt_results.
+
+Add this to your muttrc.
+
+macro index / "<enter-command>unset wait_key<enter><shell-escape>mutt-notmuch-py<enter><change-folder-readonly>~/.cache/mutt_results<enter>" \
+ "search mail (using notmuch)"
+
+This script overrides the $HOME/.cache/mutt_results each time you run a query.
+
+Install this by adding this file somewhere on your PATH.
+
+Only tested on OSX Lion.
+
+(c) 2012 - Honza Pokorny
+Licensed under BSD
+"""
+import hashlib, sys
+from subprocess import getoutput
+from mailbox import Maildir
+from optparse import OptionParser
+
+
+def digest(filename):
+ with open(filename, "r", encoding='utf-8') as f:
+ return hashlib.sha1(f.read()).hexdigest()
+
+
+def pick_all_mail(messages):
+ for m in messages:
+ if 'All Mail' in m:
+ return m
+
+
+def empty_dir(directory):
+ box = Maildir(directory)
+ box.clear()
+
+
+def command(cmd):
+ return getoutput(cmd)
+
+
+def main(dest_box):
+ query = input('Query: ').encode('utf-8')
+
+ command('mkdir -p %s' % dest_box)
+ command('mkdir -p %s/cur' % dest_box)
+ command('mkdir -p %s/new' % dest_box)
+
+ empty_dir(dest_box)
+
+ files = command('notmuch search --output=files %s' % query).split('\n')
+ files = filter(None, files)
+
+ data = {}
+ messages = []
+
+ for f in files:
+ sha = digest(f)
+ if sha not in data.keys():
+ data[sha] = [f]
+ else:
+ data[sha].append(f)
+
+ for sha in data.keys():
+ if is_gmail and len(data[sha]) > 1:
+ messages.append(pick_all_mail(data[sha]))
+ else:
+ messages.append(data[sha][0])
+
+ for m in messages:
+ command('ln -s "%s" %s/cur/' % (m, dest_box))
+
+
+if __name__ == '__main__':
+ global is_gmail
+
+ p = OptionParser("usage: %prog [OPTIONS] [RESULTDIR]")
+ p.add_option('-g', '--gmail', dest='gmail',
+ action='store_true', default=True,
+ help='gmail-specific behavior')
+ p.add_option('-G', '--not-gmail', dest='gmail',
+ action='store_false',
+ help='gmail-specific behavior')
+ (options, args) = p.parse_args()
+
+ is_gmail = options.gmail
+
+ if args:
+ dest = args[0]
+ else:
+ dest = '~/.cache/mutt_results'
+
+ main(dest.rstrip('/'))
diff --git a/bin/natlangdate.py b/bin/natlangdate.py
new file mode 100755
index 0000000..4fe7528
--- /dev/null
+++ b/bin/natlangdate.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+import os
+import sys
+import re
+import parsedatetime.parsedatetime as pdt
+#import parsedatetime.parsedatetime_consts as pdc
+import datetime
+
+# Define Globals
+pyDate = os.getenv('KMVAR_myDate')
+
+# Function for parsing a string and returning a datetime value
+def datetimeFromString( s ):
+ #c = pdc.Constants()
+ p = pdt.Calendar()
+ result, what = p.parse( s )
+ dt = 0
+ # See: http://stackoverflow.com/questions/1810432/handling-the-different-results-from-parsedatetime
+ # what was returned (see http://code-bear.com/code/parsedatetime/docs/)
+ # 0 = failed to parse
+ # 1 = date (with current time, as a struct_time)
+ # 2 = time (with current date, as a struct_time)
+ # 3 = datetime
+ if what in (1,2,3):
+ # result is struct_time
+ dt = datetime.datetime( *result[:6] )
+ if what == 0:
+ # Failed to parse
+ raise ValueError, ("Don't understand date '"+s+"'")
+ dt = "Unrecognized Date"
+ return dt
+NLPDate = datetimeFromString(pyDate)
+print NLPDate
diff --git a/bin/open-in-pane b/bin/open-in-pane
new file mode 100755
index 0000000..f5f07ab
--- /dev/null
+++ b/bin/open-in-pane
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+W3M='/usr/bin/w3m'
+
+# If the window has only one pane, create one by splitting.
+pane_count=`tmux list-panes -F '#{line}' | wc -l`
+if [ $pane_count -lt 2 ]; then
+ tmux split-window -h
+fi
+
+# Start my reader if it ain't running already, and send it the URL
+# to
+# open.
+
+w3m_process_count=`ps auxw | grep "$W3M" | grep -cv grep`
+
+if [ $w3m_process_count = '1' ];then
+ tmux send-keys -t 2 "TU" "C-u" "$1" enter
+ tmux select-pane -t 2
+else
+ tmux send-keys -t 2 "$W3M \"$1\"" enter
+ tmux select-pane -t 2
+fi
diff --git a/bin/opener.sh b/bin/opener.sh
new file mode 100755
index 0000000..e873b0d
--- /dev/null
+++ b/bin/opener.sh
@@ -0,0 +1,3 @@
+#! /bin/bash
+
+wmctrl -a Ranger; sleep 1 && xdotool key Control_L+n && xdotool key Shift+\: && xdotool type cd ${1} && xdotool key Return
diff --git a/bin/parse_subids.py b/bin/parse_subids.py
new file mode 100644
index 0000000..5e8b8f2
--- /dev/null
+++ b/bin/parse_subids.py
@@ -0,0 +1,19 @@
+from bs4 import BeautifulSoup
+import markdown
+
+
+with open('/home/lxf/writing/wired/bf-photo-deals.txt', 'r') as f:
+ data = f.read()
+result = open('/home/lxf/writing/wired/subid-links.txt', 'a')
+soup = BeautifulSoup(markdown.markdown(data), "lxml")
+subid = "blackfridayphotodeals2019"
+page_url = "https://www.wired.com/story/best-black-friday-photography-deals-2019/"
+for a in soup.find_all('a'):
+ start = a['href'].split('//')[1][:4]
+ if str(start) == 'best' or start == 'goto':
+ l = "%s,,%s,Impact,%s\n" % (page_url, subid, a['href'])
+ result.write(l)
+result.close()
+
+def parse_links(f):
+
diff --git a/bin/parse_vivaldi_notes.py b/bin/parse_vivaldi_notes.py
new file mode 100644
index 0000000..078a780
--- /dev/null
+++ b/bin/parse_vivaldi_notes.py
@@ -0,0 +1,8 @@
+import json
+with open("/home/lxf/.config/vivaldi-snapshot/Default/Notes", 'r') as data_file:
+ data = json.load(data_file)
+
+for d in data['children']:
+ if d['subject'] == 'lbh notes':
+
+ print(d['subject'])
diff --git a/bin/pass-completion.bash b/bin/pass-completion.bash
new file mode 100644
index 0000000..456485b
--- /dev/null
+++ b/bin/pass-completion.bash
@@ -0,0 +1,130 @@
+# completion file for bash
+
+# Copyright (C) 2012 - 2014 Jason A. Donenfeld <Jason@zx2c4.com> and
+# Brian Mattern <rephorm@rephorm.com>. All Rights Reserved.
+# This file is licensed under the GPLv2+. Please see COPYING for more information.
+
+_pass_complete_entries () {
+ prefix="${PASSWORD_STORE_DIR:-$HOME/.password-store/}"
+ prefix="${prefix%/}/"
+ suffix=".gpg"
+ autoexpand=${1:-0}
+
+ local IFS=$'\n'
+ local items=($(compgen -f $prefix$cur))
+
+ # Remember the value of the first item, to see if it is a directory. If
+ # it is a directory, then don't add a space to the completion
+ local firstitem=""
+ # Use counter, can't use ${#items[@]} as we skip hidden directories
+ local i=0
+
+ for item in ${items[@]}; do
+ [[ $item =~ /\.[^/]*$ ]] && continue
+
+ # if there is a unique match, and it is a directory with one entry
+ # autocomplete the subentry as well (recursively)
+ if [[ ${#items[@]} -eq 1 && $autoexpand -eq 1 ]]; then
+ while [[ -d $item ]]; do
+ local subitems=($(compgen -f "$item/"))
+ local filtereditems=( )
+ for item2 in "${subitems[@]}"; do
+ [[ $item2 =~ /\.[^/]*$ ]] && continue
+ filtereditems+=( "$item2" )
+ done
+ if [[ ${#filtereditems[@]} -eq 1 ]]; then
+ item="${filtereditems[0]}"
+ else
+ break
+ fi
+ done
+ fi
+
+ # append / to directories
+ [[ -d $item ]] && item="$item/"
+
+ item="${item%$suffix}"
+ COMPREPLY+=("${item#$prefix}")
+ if [[ $i -eq 0 ]]; then
+ firstitem=$item
+ fi
+ let i+=1
+ done
+
+ # The only time we want to add a space to the end is if there is only
+ # one match, and it is not a directory
+ if [[ $i -gt 1 || ( $i -eq 1 && -d $firstitem ) ]]; then
+ compopt -o nospace
+ fi
+}
+
+_pass_complete_folders () {
+ prefix="${PASSWORD_STORE_DIR:-$HOME/.password-store/}"
+ prefix="${prefix%/}/"
+
+ local IFS=$'\n'
+ local items=($(compgen -d $prefix$cur))
+ for item in ${items[@]}; do
+ [[ $item == $prefix.* ]] && continue
+ COMPREPLY+=("${item#$prefix}/")
+ done
+}
+
+_pass_complete_keys () {
+ local IFS=$'\n'
+ # Extract names and email addresses from gpg --list-keys
+ local keys="$(gpg2 --list-secret-keys --with-colons | cut -d : -f 10 | sort -u | sed '/^$/d')"
+ COMPREPLY+=($(compgen -W "${keys}" -- ${cur}))
+}
+
+_pass()
+{
+ COMPREPLY=()
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ local commands="init ls find grep show insert generate edit rm mv cp git help version"
+ if [[ $COMP_CWORD -gt 1 ]]; then
+ local lastarg="${COMP_WORDS[$COMP_CWORD-1]}"
+ case "${COMP_WORDS[1]}" in
+ init)
+ if [[ $lastarg == "-p" || $lastarg == "--path" ]]; then
+ _pass_complete_folders
+ compopt -o nospace
+ else
+ COMPREPLY+=($(compgen -W "-p --path" -- ${cur}))
+ _pass_complete_keys
+ fi
+ ;;
+ ls|list|edit)
+ _pass_complete_entries
+ ;;
+ show|-*)
+ COMPREPLY+=($(compgen -W "-c --clip" -- ${cur}))
+ _pass_complete_entries 1
+ ;;
+ insert)
+ COMPREPLY+=($(compgen -W "-e --echo -m --multiline -f --force" -- ${cur}))
+ _pass_complete_entries
+ ;;
+ generate)
+ COMPREPLY+=($(compgen -W "-n --no-symbols -c --clip -f --force -i --in-place" -- ${cur}))
+ _pass_complete_entries
+ ;;
+ cp|copy|mv|rename)
+ COMPREPLY+=($(compgen -W "-f --force" -- ${cur}))
+ _pass_complete_entries
+ ;;
+ rm|remove|delete)
+ COMPREPLY+=($(compgen -W "-r --recursive -f --force" -- ${cur}))
+ _pass_complete_entries
+ ;;
+ git)
+ COMPREPLY+=($(compgen -W "init push pull config log reflog rebase" -- ${cur}))
+ ;;
+ esac
+ else
+ COMPREPLY+=($(compgen -W "${commands}" -- ${cur}))
+ _pass_complete_entries 1
+ fi
+}
+
+complete -o filenames -F _pass pass
diff --git a/bin/passmenu.sh b/bin/passmenu.sh
new file mode 100755
index 0000000..7a9c517
--- /dev/null
+++ b/bin/passmenu.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+shopt -s nullglob globstar
+
+typeit=0
+if [[ $1 == "--type" ]]; then
+ typeit=1
+ shift
+fi
+
+prefix=${PASSWORD_STORE_DIR-~/.password-store}
+password_files=( "$prefix"/**/*.gpg )
+password_files=( "${password_files[@]#"$prefix"/}" )
+password_files=( "${password_files[@]%.gpg}" )
+
+password=$(printf '%s\n' "${password_files[@]}" | dmenu "$@")
+
+[[ -n $password ]] || exit
+
+if [[ $typeit -eq 0 ]]; then
+ pass show -c "$password" 2>/dev/null
+else
+ pass show "$password" | { read -r pass; printf %s "$pass"; } |
+ xdotool type --clearmodifiers --file -
+fi
diff --git a/bin/pinboard_links_to_markdown_files.py b/bin/pinboard_links_to_markdown_files.py
new file mode 100755
index 0000000..700d1aa
--- /dev/null
+++ b/bin/pinboard_links_to_markdown_files.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+import os
+import requests
+
+
+def get_pinboard_links():
+ with open(os.environ['HOME'] + '/.pinboard-credentials') as credentials:
+ for line in credentials:
+ me, token = line.split(':')
+ url = 'https://api.pinboard.in/v1/posts/all?results=80&format=json&auth_token=' + me + ':' + token
+ r = requests.get(url, timeout=9.001)
+ print(url)
+ data = r.json()
+ print(date)
+ for item in data:
+ print(item['description'])
+ md = get_markdown(item['href'])
+ body = md['markdown']
+ body.encode("utf-8")
+ file_name = str(item['description']).replace("/", "")
+ file_path = "%s/Documents/bookmarks-tmp/%s.txt" % (os.environ['HOME'], file_name.lower())
+ date = item['time']
+ tags = ", ".join(t for t in str(item['tags']).split(' '))
+ preamble = "---\ntitle: %s\ndate: %s\nsource: %s\ntags: %s\n\n---\n\n" % (md['title'], date, item['href'], tags)
+ import codecs
+ f = codecs.open(file_path, "w", 'utf8')
+ f.write(preamble)
+ f.write(body)
+ f.close()
+
+
+def get_markdown(source):
+ url = "http://heckyesmarkdown.com/go/?read=1&preview=0&showframe=0&output=json&u=%s" % (source)
+ print(url)
+ r = requests.get(url, timeout=15.001)
+ data = r.json()
+ return data
+
+get_pinboard_links()
diff --git a/bin/pomodoro.sh b/bin/pomodoro.sh
new file mode 100755
index 0000000..3ab09ac
--- /dev/null
+++ b/bin/pomodoro.sh
@@ -0,0 +1,139 @@
+#!/usr/bin/env bash
+
+###################
+# Morgan Reece (Phillips)
+# mrrrgn.com
+# wiki.mrrrgn.com
+#
+# This script will notify the user when to work and when to rest
+# in intervals as specified in http://en.wikipedia.org/wiki/Pomodoro_Technique
+# notifications are sent out via terminal-notifier http://rubygems.org/gems/terminal-notifier
+# on OS X or the pynotify library on Linux. The script will also blacklist any domains listed
+# in a file named .pomodoro.urls.blacklist stored in the users home directory.
+####################
+
+set -e
+
+SUDO_USER=${SUDO_USER}; # So we can run commands as non-root
+
+### INVERVAL CONSTANTS ###
+
+WORK_INTERVAL_SEC=1500; # 25 minutes of work
+WORK_MSG="begin-working....";
+
+REST_INTERVAL_SEC=300; # 5 minutes of rest
+REST_MSG="take-a-rest....";
+
+SET_SIZE=4; # Take a long rest after this many iterations
+SET_INTERVAL_SEC=1200; # 15 minute post-set rest
+SET_MSG="set-has-ended....";
+
+### Set up OS Specific commands and variables ###
+
+if [ `uname` = "Darwin" ]; then
+ ### OS X ###
+
+ # Make sure terminal-notifier is installed
+ if ! which terminal-notifier
+ then gem install terminal-notifier;
+ fi
+
+ CMD="terminal-notifier -title -sound default -message";
+
+ # Because we can't count on ~
+ USER_HOME="/Users/$SUDO_USER/";
+ CMD_AS_USER="sudo -u $SUDO_USER"; # We need this for notifications to work
+else
+ ### Debian/Linux ###
+ USER_HOME="/home/$SUDO_USER/";
+ CMD_AS_USER="";
+
+ # Because Linux can run headless, a more complex
+ # notification command is needed, that takes args,
+ # so a function holds all of the logic.
+ function linux_notify() {
+ # A text only notification
+ echo $1 | wall;
+ }
+ CMD="linux_notify";
+ if echo $DISPLAY 1>/dev/null; then
+ if python -m notify2 2>/dev/null; then
+ function linux_notify() {
+ python -c "import notify2;notify2.init('psh');m=notify2.Notification('pomodoro.sh', $1);m.show();";
+ }
+ fi
+ fi
+fi
+
+
+### FILE LOCATIONS ###
+
+BACKUPHOSTSFILE=.pomodoro.etc.hosts.bak; # store the unhampered hosts file here
+BLACKLIST=.pomodoro.urls.blacklist; # urls to blacklist during work
+PIDFILE=.pomodoro.pid;
+
+### OS agnostic commands ###
+CMD_WORK_START="$CMD_AS_USER $CMD '$WORK_MSG'";
+CMD_REST_START="$CMD_AS_USER $CMD '$REST_MSG'";
+CMD_SET_END="$CMD_AS_USER $CMD '$SET_MSG'";
+
+### FUNCTIONS ###
+
+function root_check {
+ if [ "$(id -u)" != "0" ]; then
+ echo "This script must be run as root" 1>&2;
+ exit 1;
+ fi
+}
+
+function blacklist_hosts_file {
+ if test -f "$USER_HOME$BLACKLIST"; then
+ cp /etc/hosts "$USER_HOME$BACKUPHOSTSFILE";
+ # A simple checksum for our backup
+ if [ $(wc -l < /etc/hosts) = $(wc -l < "$USER_HOME$BACKUPHOSTSFILE") ]; then
+ # Append our blacklist values
+ cat "$USER_HOME$BLACKLIST" | awk '{print "127.0.0.1 "$1}' >> /etc/hosts;
+ fi
+ fi
+}
+
+function unblacklist_hosts_file {
+ if test -f "$USER_HOME$BACKUPHOSTSFILE"; then
+ # Overwrite the current hosts file with our backup
+ cp "$USER_HOME$BACKUPHOSTSFILE" /etc/hosts;
+ # rm the old backup file after a checksum
+ if [ $(wc -l < /etc/hosts) = $(wc -l < "$USER_HOME$BACKUPHOSTSFILE") ]; then
+ rm "$USER_HOME$BACKUPHOSTSFILE";
+ fi
+ fi
+}
+
+### Only attempt to run if we're root ###
+root_check;
+
+### Start Working! ###
+if [ x"$1" = xstart ]; then
+ if test -f "$USER_HOME$PIDFILE"; then exit; fi
+ echo $$ > "$USER_HOME$PIDFILE"
+ while true; do
+ for i in $(seq 1 $SET_SIZE); do
+ # Work starts
+ blacklist_hosts_file;
+ $CMD_WORK_START;
+ $CMD_AS_USER sleep $WORK_INTERVAL_SEC;
+ # Rest starts
+ unblacklist_hosts_file;
+ $CMD_REST_START;
+ $CMD_AS_USER sleep $REST_INTERVAL_SEC;
+ done
+ # Set interval ends here
+ $CMD_SET_END;
+ $CMD_AS_USER sleep $SET_INTERVAL_SEC;
+ done
+elif [ x"$1" = xstop ]; then
+ # Cleanup hosts file and die.
+ unblacklist_hosts_file;
+ PID=$(cat $USER_HOME$PIDFILE);
+ rm -f $USER_HOME$PIDFILE
+ kill -9 $PID;
+fi
diff --git a/bin/qutebrowser-cookiecleaner b/bin/qutebrowser-cookiecleaner
new file mode 100755
index 0000000..0aa06c1
--- /dev/null
+++ b/bin/qutebrowser-cookiecleaner
@@ -0,0 +1,212 @@
+#!/usr/bin/perl
+# qutebrowser-cookiecleaner
+# Copyright (C) Eskild Hustvedt 2018
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+use 5.014;
+use warnings;
+use DBI;
+use List::Util 1.33 qw(all);
+use File::Basename qw(basename);
+use Getopt::Long;
+use File::Glob qw(bsd_glob);
+use constant {
+ V_INFO => 1,
+};
+
+my $VERSION = 0.1;
+my $verbosity = 0;
+my $XDG_DATA_HOME = $ENV{XDG_DATA_HOME} ? $ENV{XDG_DATA_HOME} : $ENV{HOME}.'/.local/share';
+my $XDG_CONFIG_HOME = $ENV{XDG_CONFIG_HOME} ? $ENV{XDG_CONFIG_HOME} : $ENV{HOME}.'/.config';
+
+# Output something in verbose mode
+sub sayv
+{
+ my $level = shift;
+ if ($verbosity >= $level)
+ {
+ say(@_);
+ }
+}
+
+# Retrieve a database handle
+sub DBH
+{
+ my $dbfile = $XDG_DATA_HOME.'/qutebrowser/webengine/Cookies';
+ if (!-e $dbfile)
+ {
+ die($dbfile.': does not exist'."\n");
+ }
+ my $dbh = DBI->connect("dbi:SQLite:dbname=$dbfile",'','');
+
+ return $dbh;
+}
+
+# Expire cookies, local storage, IndexedDB and databases, except those whitelisted
+sub expireContent
+{
+ my $dbh = DBH();
+
+ my @statements;
+ my @exceptions;
+ my @whitelist;
+
+ open(my $i,'<',$XDG_CONFIG_HOME.'/qutebrowser-cookiecleaner.list');
+ my $no = 0;
+ while (my $line = <$i>)
+ {
+ $no++;
+ next if $line =~ /^\s*#/;
+ chomp($line);
+ if (index($line,'%') != -1)
+ {
+ die('Line '.$no.' contains forbidden character "%"'."\n");
+ }
+ push(@exceptions,'%'.$line);
+ push(@statements,'host_key NOT LIKE ?');
+ push(@whitelist,$line);
+ }
+ close($i);
+
+ if(scalar @exceptions == 0)
+ {
+ die('Your qutebrowser-cookiecleaner.list file has no exceptions. Refusing to continue.'."\n");
+ }
+
+ # --
+ # Cookies
+ # --
+
+ my $sth;
+
+ my $deleteStatement = 'DELETE FROM cookies WHERE '.join(' AND ',@statements);
+ my $deletedEntriesStatement = 'SELECT * FROM cookies WHERE '.join(' AND ',@statements);
+
+ # If verbose mode is on, we need more information, so fetch and output the
+ # entries we will delete.
+ if ($verbosity > 0)
+ {
+ $sth = $dbh->prepare($deletedEntriesStatement);
+ $sth->execute(@exceptions);
+
+ while(my $entry = $sth->fetchrow_hashref)
+ {
+ sayv(V_INFO,'Deleting "'.$entry->{name}.'" for "'.$entry->{host_key}.'"');
+ }
+ }
+
+ $sth = $dbh->prepare($deleteStatement);
+ $sth->execute(@exceptions);
+
+ $sth = $dbh->prepare('VACUUM;');
+ $sth->execute();
+
+ $dbh->disconnect;
+
+ # --
+ # Localstorage
+ # --
+
+ my $localstorageDir = $XDG_DATA_HOME.'/qutebrowser/webengine/Local Storage/*.localstorage';
+
+ foreach my $file (bsd_glob($localstorageDir))
+ {
+ if(all { index(basename($file),$_) == -1 } @whitelist )
+ {
+ sayv(V_INFO,'Deleting Local Storage file '.basename($file));
+ unlink($file);
+ if (-e $file.'-journal')
+ {
+ unlink($file.'-journal');
+ }
+ }
+ }
+
+ foreach my $dirEntry (qw(IndexedDB databases))
+ {
+ my $dirsGlob = $XDG_DATA_HOME.'/qutebrowser/webengine/'.$dirEntry.'/*';
+
+ foreach my $dir (bsd_glob($dirsGlob))
+ {
+ next if ! -d $dir;
+ if(all { index(basename($dir),$_) == -1 } @whitelist )
+ {
+ sayv(V_INFO,'Deleting '.$dirEntry.' for '.basename($dir));
+ unlink(bsd_glob($dir.'/*')) or die("Unlink error for subtree: $dir: $!\n");
+ rmdir($dir) or die("Failed to rmdir $dir: $!\n");
+ }
+ }
+ }
+}
+
+# Main function. Command-line parsing.
+sub main
+{
+ my $wrap = 0;
+
+ GetOptions(
+ 'help' => sub {
+ say 'qutebrowser-cookiecleaner version '.$VERSION;
+ say "";
+ say "Usage: qutebrowser-cookiecleaner [OPTIONS]";
+ say "";
+ say "Options:";
+ say " -v, --verbose Increase verbosity";
+ say " --help Show this help screen";
+ say " --wrap Wrap another command, cleaning on start and";
+ say " exit. See README.md for details.";
+ exit(0);
+ },
+ 'wrap' => \$wrap,
+ 'version' => sub
+ {
+ print 'qutebrowser-cookiecleaner version '.$VERSION."\n";
+ exit(0);
+ },
+ 'v|verbose+' => \$verbosity,
+ ) or do
+ {
+ if ($wrap)
+ {
+ warn('Maybe you need to add -- after --wrap (--wrap --)?'."\n")
+ }
+ die('See --help for more information'."\n");
+ };
+
+
+ if ($wrap)
+ {
+ if(scalar @ARGV == 0)
+ {
+ die('--wrap requires additional parameters: the command to run'."\n");
+ }
+ $0 = 'qutebrowser-cookiecleaner: cleaning';
+ expireContent();
+ $0 = 'qutebrowser-cookiecleaner: waiting for '.join(' ',@ARGV);
+ system(@ARGV);
+ if ($? == -1)
+ {
+ die('Failed to execute subprocess '.join(' ',@ARGV).': '.$!."\n");
+ }
+ $0 = 'qutebrowser-cookiecleaner: cleaning';
+ }
+ elsif(scalar @ARGV != 0)
+ {
+ die('Unknown parameters: '.join(' ',@ARGV)."\nDid you mean to use --wrap?\n");
+ }
+
+ expireContent();
+}
+
+main();
diff --git a/bin/rename_avi_files.sh b/bin/rename_avi_files.sh
new file mode 100755
index 0000000..f268647
--- /dev/null
+++ b/bin/rename_avi_files.sh
@@ -0,0 +1 @@
+exiftool -ext avi -d %Y-%m-%d_%H%M%S_$1 '-filename<${DateTimeOriginal}.%e' $2
diff --git a/bin/rename_iphone_files.sh b/bin/rename_iphone_files.sh
new file mode 100755
index 0000000..ecb132f
--- /dev/null
+++ b/bin/rename_iphone_files.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+exiftool -d $1_%Y-%m-%d_%H%M%S '-filename<${CreateDate}.%e' $2
+#exiftool -filename=%-.6f%.3nC.%e `ls | sort`
diff --git a/bin/rename_mov_files.sh b/bin/rename_mov_files.sh
new file mode 100755
index 0000000..f1cd2d5
--- /dev/null
+++ b/bin/rename_mov_files.sh
@@ -0,0 +1 @@
+exiftool -ext mov -d %Y-%m-%d_%H%M%S_$1 '-filename<${CreateDate}.%e' $2
diff --git a/bin/rename_mp4_files.sh b/bin/rename_mp4_files.sh
new file mode 100755
index 0000000..c778460
--- /dev/null
+++ b/bin/rename_mp4_files.sh
@@ -0,0 +1 @@
+exiftool -ext mp4 -d %Y-%m-%d_%H%M%S_$1 '-filename<${CreateDate}.%e' $2
diff --git a/bin/rename_mts_files.sh b/bin/rename_mts_files.sh
new file mode 100755
index 0000000..6847a5b
--- /dev/null
+++ b/bin/rename_mts_files.sh
@@ -0,0 +1 @@
+exiftool -ext mts -d %Y-%m-%d_%H%M%S_$1 '-filename<${DateTimeOriginal}.%e' $2
diff --git a/bin/rename_phone_files.sh b/bin/rename_phone_files.sh
new file mode 100755
index 0000000..e1d585f
--- /dev/null
+++ b/bin/rename_phone_files.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+exiftool -d $1_%Y-%m-%d_%H%M%S '-filename<${Exif:CreateDate}_$MyModel.%e' $2
+#exiftool -filename=%-.6f%.3nC.%e `ls | sort`
diff --git a/bin/rename_raw_files.sh b/bin/rename_raw_files.sh
new file mode 100755
index 0000000..6d488b1
--- /dev/null
+++ b/bin/rename_raw_files.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+exiftool -d %Y-%m-%d_%H%M%S%%-c_$1 '-filename<${Exif:CreateDate}.%e' $2
+#exiftool -filename=%-.6f%.3nC.%e `ls | sort`
diff --git a/bin/review.py b/bin/review.py
new file mode 100755
index 0000000..a17cccc
--- /dev/null
+++ b/bin/review.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+import os
+from os.path import abspath, dirname
+from operator import itemgetter
+
+"""
+ parse my gtd folder
+ pull out files that start with the project name which is in []
+ loop through and grab the project and then all the associated
+ tasks (rest of the file name)
+ print the results out to a file called @project.txt
+"""
+# assuming that this script is in ~/bin this will work:
+BASE_DIR = abspath(dirname(dirname(__file__))) + '/'
+project_list = []
+inprogress = False
+for root, dirnames, filenames in os.walk('%sgtd' % (BASE_DIR)):
+ for f in filenames:
+ if not f.startswith('@') and not f.startswith('.') and not f.startswith('Notes & Settings') and not f.startswith('projx') and not f.startswith("errands"):
+
+ name = f[f.find("[") + 1:f.find("]")]
+ task = f[f.find("]") + 2:-4]
+
+ if f.find("]") > 5:
+ inprogress = True
+ task = "* " + task
+ with open('%sgtd/%s' % (BASE_DIR, f), "r") as fl:
+ for line in fl:
+ for part in line.split():
+ if "@waiting" in part:
+ task = task + " -- " + line.rstrip()
+ project_list.append({
+ "file": f,
+ "name": name,
+ "task": task,
+ "inprogress": inprogress,
+ })
+ break
+
+newlist = sorted(project_list, key=itemgetter('name', 'task'))
+
+projects_file = open("/Users/sng/gtd/@projects.txt", 'w')
+l_name = ''
+
+for p in newlist:
+ if l_name == p['name']:
+ print >> projects_file, "\t" + p['task']
+ else:
+ print >> projects_file, "\n" + p['name']
+ print >> projects_file, "\t" + p['task']
+ l_name = p['name']
diff --git a/bin/run_pandoc.sh b/bin/run_pandoc.sh
new file mode 100755
index 0000000..1874c01
--- /dev/null
+++ b/bin/run_pandoc.sh
@@ -0,0 +1,4 @@
+#! /bin/sh
+args=("$@")
+echo ${args[0]}
+pandoc -s -smart -c ${args[0]} ${args[1]} -t html5 -o args[2]
diff --git a/bin/showall.sh b/bin/showall.sh
new file mode 100755
index 0000000..bd5fcca
--- /dev/null
+++ b/bin/showall.sh
@@ -0,0 +1,3 @@
+clear
+echo '---- ALL TASKS -----'
+python ~/bin/alltasks.py
diff --git a/bin/showalltype.sh b/bin/showalltype.sh
new file mode 100755
index 0000000..5a6c606
--- /dev/null
+++ b/bin/showalltype.sh
@@ -0,0 +1,8 @@
+GREEN=$(tput setaf 2; tput bold)
+NORMAL=$(tput sgr0)
+function green() {
+ echo "$GREEN$*$NORMAL"
+}
+
+clear
+python ~/bin/alltasks_by_type.py $1
diff --git a/bin/showdone.sh b/bin/showdone.sh
new file mode 100755
index 0000000..4ee3c13
--- /dev/null
+++ b/bin/showdone.sh
@@ -0,0 +1,4 @@
+clear
+echo '----- TASKS COMPLETED TODAY -----'
+cd ~/gtd/done/$(date '+%Y-%m-%d')
+ls
diff --git a/bin/showinprogress.sh b/bin/showinprogress.sh
new file mode 100755
index 0000000..3da0736
--- /dev/null
+++ b/bin/showinprogress.sh
@@ -0,0 +1,3 @@
+clear
+echo '----- TASKS IN PROGRESS -----'
+cd ~/gtd && ls | grep ^qq
diff --git a/bin/smartresize.sh b/bin/smartresize.sh
new file mode 100755
index 0000000..b38771e
--- /dev/null
+++ b/bin/smartresize.sh
@@ -0,0 +1,3 @@
+smartresize() {
+ mogrify -path $3 -filter Triangle -define filter:support=2 -thumbnail $2 -unsharp 0.25x0.08+8.3+0.045 -dither None -posterize 136 -quality 82 -define jpeg:fancy-upsampling=off -define png:compression-filter=5 -define png:compression-level=9 -define png:compression-strategy=1 -define png:exclude-chunk=all -interlace none -colorspace sRGB $1
+}
diff --git a/bin/stardict-dictd-web1913-2.4.2.tar.bz2 b/bin/stardict-dictd-web1913-2.4.2.tar.bz2
new file mode 100644
index 0000000..b73d88b
--- /dev/null
+++ b/bin/stardict-dictd-web1913-2.4.2.tar.bz2
Binary files differ
diff --git a/bin/temp.js b/bin/temp.js
new file mode 100644
index 0000000..51585b4
--- /dev/null
+++ b/bin/temp.js
@@ -0,0 +1,13 @@
+var targetDiv = document.getElementById("tag_cloud_header")
+var ul = document.createElement("ul");
+var links = [ "/u:luxagraf/t:luxagraf/t:@post/", "/u:luxagraf/t:%2523lhp/t:@post/", "/u:luxagraf/t:%2523flw/","/u:luxagraf/t:%2523rei/t:@post/"]
+var text = ["Luxagraf Post Research", "LongHandPixels Post Research", "Freelance Writing Ideas", "REI Post Research"];
+for (var i=0; i<links.length; i++) {
+ var li = document.createElement("li");
+ var link = document.createElement("a");
+ link.setAttribute('href',links[i]);
+ link.innerHTML = text[i];
+ li.appendChild(link);
+ ul.appendChild(li);
+}
+targetDiv.appendChild(ul);
diff --git a/bin/tweet_archive_tools.py b/bin/tweet_archive_tools.py
new file mode 100755
index 0000000..2737250
--- /dev/null
+++ b/bin/tweet_archive_tools.py
@@ -0,0 +1,191 @@
+## Last updated 8 Dec 2013
+##
+## This program takes data from a locally downloaded Twitter archive
+## and outputs HTML, Text, JSON, geo-coords in CSV, and best friends in csv.
+## See http://blog.twitter.com/2012/12/your-twitter-archive.html
+##
+## It can run either as a dedicated program or as a module.
+##
+## Please visit https://github.com/mshea/Parse-Twitter-Archive
+## for more information.
+##
+## This work is licensed under the Creative Commons Attribution
+## NonCommercial-ShareAlike 3.0 License. You are free to share, copy,
+## distribute, transmit, remix, and adapt the work as long as you attribute
+## it to Michael E. Shea at http://mikeshea.net/, share the work under
+## the same license, and do so for non-commercial purposes. To view a copy
+## of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/.
+##
+
+import glob
+import json
+import csv
+import datetime
+import collections
+import re
+import sqlite3
+from datetime import datetime
+from datetime import timedelta
+from itertools import islice, izip
+from collections import Counter
+
+params = {
+ 'data_files': './data/js/tweets/*.js',
+ 'geo_output': 'mshea_tweets_geo.csv',
+ 'text_output': 'mshea_tweets.txt',
+ 'json_output': 'mshea_tweets.json',
+ 'bff_output': 'mshea_bffs.csv',
+ 'csv_output': 'mshea_tweets.csv',
+ 'sqlite3_output': 'mshea_tweets.sqlite3',
+ 'html_output': 'mshea_tweets.html',
+ 'twitter_user_id': 'mshea',
+}
+
+
+def load_data(files):
+ items = []
+ files = glob.glob(files)
+ for file in files:
+ with open(file) as f:
+ d = f.readlines()[1:] # Twitter's JSON first line is bogus
+ d = "".join(d)
+ j = json.loads(d)
+ for tweet in j:
+ items.append(tweet)
+ return sorted(items, key=lambda k: k['id'])
+
+
+def get_bffs(d):
+ words = []
+ for item in d:
+ item_words = item['text'].split()
+ for word in item_words:
+ if '@' in word:
+ words.append(word.replace(':', '').lower().encode('utf-8'))
+ return collections.Counter(words).most_common(50)
+
+
+def get_bigrams(d):
+ words = []
+ for item in d:
+ item_words = re.findall('\w+', item['text'])
+ words += item_words
+ output = (Counter(zip(words, words[1:])).most_common(100))
+ for item in output:
+ print item
+
+def get_csv_output(d):
+ output = [('id', 'date', 'tweet')]
+ for item in d:
+ output.append((
+ item['id_str'],
+ item['created_at'],
+ item['text'].encode('utf-8')
+ ))
+ return output
+
+
+def get_geo(d):
+ output = [('date', 'tweet', 'lat', 'long')]
+ for item in d:
+ try:
+ lat = item['geo']['coordinates'][0]
+ long = item['geo']['coordinates'][1]
+ date = item['created_at']
+ text = item['text'].encode('utf-8')
+ output.append((date, text, lat, long))
+ except:
+ error = "no coordinates"
+ return output
+
+
+def link_https_in_text(text):
+ parsed_text = re.sub('http://[^ ,]*',
+ lambda t: "<a href='%s'>%s</a>" %
+ (t.group(0), t.group(0)), text)
+ return parsed_text
+
+
+def write_html(tweets, output_file):
+ html_output = ""
+ for item in tweets:
+ d = datetime.strptime(item['created_at'],
+ '%Y-%m-%d %H:%M:%S +0000')
+ - timedelta(hours=5)
+ day_string = d.strftime('%d %b %Y %I:%M %p')
+ true_time_object = d + timedelta(hours=5)
+ time_element = true_time_object.isoformat("T")
+ text = link_https_in_text(item['text'])
+ tweet_link = 'http://twitter.com/%s/status/%s'\
+ % (params['twitter_user_id'], item['id'])
+ html_output += '<li id=%s>%s - <a href="%s">'\
+ '<time datetime="%s">%s</time></a></li>\n' \
+ % (item['id'],
+ text,
+ tweet_link,
+ time_element,
+ day_string)
+ with open(output_file, "w") as f:
+ f.write('<!DOCTYPE html>\n'
+ '<title>Twitter Archive Output</title>\n'
+ '<ul>\n')
+ f.write(html_output.encode('utf-8'))
+ f.write('</ul>')
+
+
+def write_sqlite3(json_input, output_file):
+ conn = sqlite3.connect(output_file)
+ c = conn.cursor()
+ try:
+ c.execute('select count(*) from tweets')
+ except:
+ c.execute('CREATE TABLE tweets'
+ '(id int not null primary key, '
+ 'created_at text, text text)')
+ conn.commit()
+ data_to_write = []
+ for item in json_input:
+ data_to_write.append((int(item['id_str']),
+ item['created_at'],
+ item['text']))
+ c.executemany('INSERT OR REPLACE '
+ 'INTO tweets VALUES (?,?,?);',
+ data_to_write)
+ conn.commit()
+
+
+def write_text(tweets, output_file):
+ text_output = ''
+ for item in tweets:
+ text_output += '%s\n%s\n%s\n\n' % (item['id'],
+ item['created_at'],
+ item['text'])
+ with open(output_file, "w") as f:
+ f.write(text_output.encode('utf-8'))
+
+
+def write_csv(d, csv_file):
+ with open(csv_file, 'w') as f:
+ writer = csv.writer(f)
+ writer.writerows(d)
+
+
+def write_json(json_data, output_file):
+ with open(output_file, 'w') as f:
+ f.write(json.dumps(json_data, indent=4))
+
+
+def main():
+ d = load_data(params['data_files'])
+ #get_bigrams(d)
+ write_csv(get_bffs(d), params['bff_output'])
+ write_csv(get_geo(d), params['geo_output'])
+ write_csv(get_csv_output(d), params['csv_output'])
+ write_html(d, params['html_output'])
+ write_text(d, params['text_output'])
+ write_json(d, params['json_output'])
+ write_sqlite3(d, params['sqlite3_output'])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/bin/upgrad_pg.sh b/bin/upgrad_pg.sh
new file mode 100755
index 0000000..ccdfbcc
--- /dev/null
+++ b/bin/upgrad_pg.sh
@@ -0,0 +1,8 @@
+FROM_VERSION="$1"
+
+pacman -S --needed postgresql-old-upgrade
+chown postgres:postgres /var/lib/postgres/
+su - postgres -c "mv /var/lib/postgres/data /var/lib/postgres/data-${FROM_VERSION}"
+su - postgres -c 'mkdir /var/lib/postgres/data'
+su - postgres -c "initdb --locale $LANG -E UTF8 -D /var/lib/postgres/data"
+su - postgres -c "pg_upgrade -b /opt/pgsql-${FROM_VERSION}/bin/ -B /usr/bin/ -d /var/lib/postgres/data-${FROM_VERSION} -D /var/lib/postgres/data"
diff --git a/bin/vcs_query.py b/bin/vcs_query.py
new file mode 100755
index 0000000..7862047
--- /dev/null
+++ b/bin/vcs_query.py
@@ -0,0 +1,358 @@
+#!/usr/bin/env python3
+# -*- coding: utf8 -*-
+
+# This file is part of vcs_query - https://github.com/mageta/vcs_query
+# SPDX-License-Identifier: MIT
+# See file LICENSE for more information.
+
+# TODO: modules documentation
+
+import collections
+import email.utils
+import argparse
+import hashlib
+import logging
+import pickle
+import sys
+import os
+import re
+
+# vCard Standards:
+# 3.0 https://tools.ietf.org/html/rfc2426
+# 4.0 https://tools.ietf.org/html/rfc6350
+from vobject import readComponents as VObjectRead
+from vobject.base import VObjectError
+
+LOGGER = logging.getLogger(__name__)
+
+Version = collections.namedtuple("Version", ["major", "minor", "patch"])
+VERSION = Version(
+ major=0,
+ minor=4,
+ patch=0,
+)
+
+def main(argv):
+ optparser = argparse.ArgumentParser(prog=argv[0],
+ description="Query vCard Files for "
+ "EMail Addresses")
+ optparser.add_argument("pattern", metavar="PATTERN",
+ nargs='?', default=None,
+ help="only those lines that contain PATTERN will be"
+ "displayed")
+ optparser.add_argument("--version",
+ action="version",
+ version="%(prog)s version "
+ "{v.major:d}.{v.minor:d}.{v.patch:d}".format(
+ v=VERSION))
+ optparser.add_argument("-d", "--vcard-dir",
+ required=True, action='append',
+ help="specify directory containing vCards (can be "
+ "given multiple times)")
+ optparser.add_argument("-a", "--all-addresses",
+ required=False, action="store_true",
+ help="display all addresses stored for a contact")
+ optparser.add_argument("-n", "--sort-names",
+ required=False, action="store_true",
+ help="sort the result according to the contact name "
+ "(the default is to sort according to mail-"
+ "address first)")
+ optparser.add_argument("-r", "--regex",
+ required=False, action="store_true",
+ help="interpret PATTERN as regular expression "
+ "(syntax: https://docs.python.org/3/library/"
+ "re.html#regular-expression-syntax)")
+ optparser.add_argument("-m", "--mode",
+ required=False, type=str,
+ choices=OutputFormat.available,
+ default=OutputFormat.available[0],
+ help="select output-mode (default: "
+ "{})".format(OutputFormat.available[0]))
+ args = optparser.parse_args(argv[1:])
+
+ for vcdir in args.vcard_dir:
+ if not os.path.isdir(vcdir):
+ optparser.error("'{}' is not a directory".format(vcdir))
+
+ try:
+ output = OutputFormat(args.mode)
+ except LookupError as error:
+ optparser.error(error)
+
+ try:
+ pattern = Pattern(args.pattern, args.regex)
+ except re.error as error:
+ optparser.error("Given PATTERN is not a valid regular "
+ "expression: {!s}".format(error))
+
+ print("vcs_query.py, see https://github.com/mageta/vcs_query")
+
+ # Load all contacts from the given vCard-Directories; duplicates are
+ # automatically handled by using a set
+ contacts_uniq = set()
+ for vcdir in args.vcard_dir:
+ try:
+ for vcard in VcardCache(vcdir).vcards:
+ if vcard:
+ if args.all_addresses:
+ contacts_uniq.update(vcard)
+ else:
+ contacts_uniq.add(vcard[0])
+ except OSError as error:
+ LOGGER.error("Error while reading vCard Dir: %s: %s", vcdir, error)
+
+ # sort the found contacts according to the given command-line options
+ if not args.sort_names:
+ contacts = sorted(contacts_uniq,
+ key=(lambda x: (x.mail.lower(), x.name.lower(),
+ x.description.lower())))
+ else:
+ contacts = sorted(contacts_uniq,
+ key=(lambda x: (x.name.lower(), x.mail.lower(),
+ x.description.lower())))
+
+ for contact in contacts:
+ if pattern.search(output.format(contact)):
+ print(output.format_escape(contact))
+
+class OutputFormat(object):
+ available = ("mutt", "vim")
+
+ def __init__(self, mode):
+ if mode not in OutputFormat.available:
+ raise LookupError("'{}' is not a supported "
+ "output-mode".format(mode))
+
+ self.mode = mode
+
+ def format(self, contact):
+ if self.mode == "mutt":
+ return "{}\t{}\t{}".format(contact.mail, contact.name,
+ contact.description)
+ elif self.mode == "vim":
+ return "{} <{}>".format(contact.name, contact.mail)
+
+ def format_escape(self, contact):
+ if self.mode == "mutt":
+ return self.format(contact)
+ elif self.mode == "vim":
+ return email.utils.formataddr((contact.name, contact.mail))
+
+class Pattern(object):
+ def __init__(self, pattern, is_regex):
+ self.match_all = False if pattern else True
+ self.is_regex = is_regex
+
+ if not self.match_all:
+ if self.is_regex:
+ self.pattern = re.compile(pattern, re.IGNORECASE)
+ else:
+ self.pattern = pattern.lower()
+
+ def search(self, string):
+ if self.match_all:
+ return True
+
+ if self.is_regex and self.pattern.search(string):
+ return True
+ elif not self.is_regex and self.pattern in string.lower():
+ return True
+
+ return False
+
+class VcardCache(object):
+ def __init__(self, vcard_dir):
+ self.cache_dir = os.path.expanduser("~/.cache/")
+ self.vcard_dir = os.path.normcase(os.path.normpath(vcard_dir))
+
+ dhsh = hashlib.sha256()
+ dhsh.update(self.vcard_dir.encode())
+ self.pickle_path = os.path.join(self.cache_dir,
+ "{}.vcs_query".format(dhsh.hexdigest()))
+
+ self.last_vcard_dir_timestamp = 0
+ self.vcard_files = {}
+
+ self._state = self._load()
+ self._update()
+ self._serialize()
+
+ _cache_version = 1
+
+ @property
+ def _default_state(self):
+ return (VcardCache._cache_version, 0, {})
+
+ @property
+ def _state(self):
+ return (VcardCache._cache_version,
+ self.last_vcard_dir_timestamp, self.vcard_files)
+
+ @_state.setter
+ def _state(self, value):
+ self.last_vcard_dir_timestamp = value[1]
+ self.vcard_files = value[2]
+
+ def _load(self):
+ try:
+ with open(self.pickle_path, "rb") as cache:
+ obj = pickle.load(cache)
+
+ # prune invalid or outdated cache-files
+ if not isinstance(obj, tuple) or len(obj) < 3:
+ raise RuntimeError("Invalid type")
+ elif obj[0] != VcardCache._cache_version:
+ raise RuntimeError("Invalid Version ({})".format(obj[0]))
+
+ return obj
+ except (OSError, RuntimeError, AttributeError, EOFError, ImportError,
+ IndexError, pickle.UnpicklingError) as error:
+ if not isinstance(error, OSError) or error.errno != 2:
+ LOGGER.warning("Cache file (%s) could not be read: %s",
+ self.pickle_path, error)
+ return self._default_state
+
+ def _update(self):
+ vcard_dir_timestamp = get_timestamp(self.vcard_dir)
+ if vcard_dir_timestamp > self.last_vcard_dir_timestamp:
+ self.last_vcard_dir_timestamp = vcard_dir_timestamp
+
+ paths = set()
+ # let erros in os.scandir() bubble up.. the whole thing failed
+ with os.scandir(self.vcard_dir) as directory:
+ for node in directory:
+ try:
+ path = os.path.abspath(node.path)
+ if node.is_file():
+ paths.add(path)
+ except OSError as err:
+ LOGGER.error("Error reading vCard: %s: %s", node, err)
+
+ # prune vCards that don't exist anymore
+ removed = list()
+ for path in self.vcard_files.keys():
+ if path not in paths:
+ # we can not delete items from self.vcard_files while we
+ # iterate over it, so remember them instead
+ removed += [path]
+
+ for path in removed:
+ del self.vcard_files[path]
+
+ # add or update vCards
+ for path in paths:
+ vcard = self.vcard_files.get(path)
+ if not vcard or vcard.needs_update():
+ try:
+ vcard = VcardFile(path)
+ self.vcard_files[path] = vcard
+ except OSError as err:
+ LOGGER.error("Error reading vCard: %s: %s", path, err)
+ try:
+ del self.vcard_files[path]
+ except KeyError:
+ pass
+
+ def _serialize(self):
+ try:
+ if not os.path.isdir(self.cache_dir):
+ os.mkdir(self.cache_dir)
+ with open(self.pickle_path, "wb") as cache:
+ pickle.dump(self._state, cache)
+ except OSError:
+ LOGGER.warning("Cannot write to cache file: %s", self.pickle_path)
+
+ @property
+ def vcards(self):
+ for vcard_file in self.vcard_files.values():
+ for vcard in vcard_file.vcards:
+ yield vcard
+
+class Vcard(object):
+ Contact = collections.namedtuple("Contact", ["mail", "name", "description"])
+
+ def __init__(self, component):
+ # Property FN
+ # https://tools.ietf.org/html/rfc6350#section-6.2.1
+ self.name = ""
+ if "fn" in component.contents:
+ self.name = component.fn.value
+
+ # Property EMAIL
+ # https://tools.ietf.org/html/rfc6350#section-6.4.2
+ self.mails = []
+ if "email" in component.contents:
+ self.mails = [mail.value for mail in component.contents["email"]]
+
+ # Property NOTE
+ # https://tools.ietf.org/html/rfc6350#section-6.7.2
+ self.description = ""
+ if "note" in component.contents:
+ self.description = "; ".join([
+ line for line in component.note.value.splitlines() if line
+ ])
+
+ def _get_mail_contact(self, mail):
+ return Vcard.Contact(str(mail), str(self.name), str(self.description))
+
+ def __getitem__(self, i):
+ return self._get_mail_contact(self.mails[i])
+
+ def __iter__(self):
+ for mail in self.mails:
+ yield self._get_mail_contact(mail)
+
+ def __len__(self):
+ return len(self.mails)
+
+class VcardFile(object):
+ vobject_logger = logging.getLogger("vobject.base")
+
+ def __init__(self, path):
+ self.path = path
+ self.timestamp = get_timestamp(path)
+ self.vcards = []
+ self._read_components(path)
+
+ def _read_components(self, path):
+ # As per https://tools.ietf.org/html/rfc6350#section-3.1
+ # the charset for a vCard MUST be UTF-8
+ try:
+ # let errors from FILE-I/O bubble up, this whole vCard is failed
+ with open(path, encoding="utf-8", errors="strict") as vcfile:
+ for component in VObjectRead(vcfile, ignoreUnreadable=True):
+ if component.name.lower() == "vcard":
+ # Normal Case: vCard is the top property:
+ # https://tools.ietf.org/html/rfc6350#section-6.1.1
+ self.vcards += [Vcard(component)]
+ elif "vcard" in component.contents:
+ # Special case from RFC2426; in that version it was
+ # possible to nest vCards:
+ # https://tools.ietf.org/html/rfc2426#section-2.4.2
+ # This has since been removed:
+ # https://tools.ietf.org/html/rfc6350#appendix-A.2
+ # But we keep the code as it is rather simple and it
+ # provides backwards-compatibility
+ self.vcards += [Vcard(component.vcard)]
+ else:
+ LOGGER.warning("No vCard in a component in: %s", path)
+ except VObjectError as error:
+ LOGGER.error("Parser Error in file: %s: %s", path, error)
+ except ValueError as error:
+ LOGGER.error("Bad Encoding in file: %s: %s", path, error)
+
+ def needs_update(self):
+ return get_timestamp(self.path) > self.timestamp
+
+# vobject regularly complains about unparsable streams and such, but as we
+# don't really know which files should be vcards and which not, in the
+# directory we are given, this is a bit much, and will only concern users, so
+# we just ignore most warnings (there are exception, like when we found
+# something that looks like a vCard but is not parsable after all).
+VcardFile.vobject_logger.setLevel(logging.ERROR + 1)
+
+def get_timestamp(path):
+ return os.stat(path).st_mtime
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/bin/web1913.dict.dz b/bin/web1913.dict.dz
new file mode 100644
index 0000000..b234473
--- /dev/null
+++ b/bin/web1913.dict.dz
Binary files differ
diff --git a/bin/web1913.idx b/bin/web1913.idx
new file mode 100644
index 0000000..59cf917
--- /dev/null
+++ b/bin/web1913.idx
Binary files differ
diff --git a/bin/weekly-rsync.sh b/bin/weekly-rsync.sh
new file mode 100755
index 0000000..8dcdcf6
--- /dev/null
+++ b/bin/weekly-rsync.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# ----------------------------------------------------------------------
+# rotating-filesystem-snapshot utility
+# essentially, rotate backup-snapshots of /home
+# on a weekly basis using rsync and cron
+# ----------------------------------------------------------------------
+
+
+rm -rf /mnt/backup/week.6
+mv /mnt/backup/week.5 /mnt/backup/week.6
+mv /mnt/backup/week.4 /mnt/backup/week.5
+mv /mnt/backup/week.3 /mnt/backup/week.4
+mv /mnt/backup/week.2 /mnt/backup/week.3
+mv /mnt/backup/week.1 /mnt/backup/week.2
+mv /mnt/backup/week.0 /mnt/backup/week.1
+rsync -avvz -K ~/ \
+--copy-links \
+--exclude-from '/home/lxf/.rsync-exclude-weekly' \
+--link-dest=/mnt/backup/week.1 \
+ /mnt/backup/week.0/
diff --git a/bin/wired-count-deals-items.py b/bin/wired-count-deals-items.py
new file mode 100755
index 0000000..3fb5bc9
--- /dev/null
+++ b/bin/wired-count-deals-items.py
@@ -0,0 +1,24 @@
+import sys
+from bs4 import BeautifulSoup
+import markdown
+
+filename = '%s' %(sys.argv[1])
+subid = '%s' %(sys.argv[2])
+page_url = "https://www.wired.com/story/best-black-friday-photography-deals-2019/"
+with open(filename, newline='') as f:
+ content = f.readlines()
+ count = 0
+ for line in content:
+ if line.startswith('- **'):
+ count = count + 1
+ print("count is: ", count)
+
+
+with open(filename, newline='') as f:
+ data = f.read()
+ soup = BeautifulSoup(markdown.markdown(data), "lxml")
+ for a in soup.find_all('a'):
+ start = a['href'].split('//')[1][:4]
+ if str(start) == 'best' or start == 'goto':
+ l = "%s,,%s,Impact,%s\n" % (page_url, subid, a['href'])
+ print(l)
diff --git a/bin/wired-count-h4-items.py b/bin/wired-count-h4-items.py
new file mode 100755
index 0000000..df39d3c
--- /dev/null
+++ b/bin/wired-count-h4-items.py
@@ -0,0 +1,10 @@
+import sys
+
+filename = '%s' %(sys.argv[1])
+with open(filename, newline='') as f:
+ content = f.readlines()
+ count = 0
+ for line in content:
+ if line.startswith('####'):
+ count = count + 1
+ print(count)
diff --git a/bin/wired-dedup-h4-items.py b/bin/wired-dedup-h4-items.py
new file mode 100644
index 0000000..77858fb
--- /dev/null
+++ b/bin/wired-dedup-h4-items.py
@@ -0,0 +1,13 @@
+import sys, collections
+
+filename = '%s' %(sys.argv[1])
+with open(filename, newline='') as f:
+ content = f.readlines()
+ new_list = []
+ for line in content:
+ if line.startswith('####'):
+ title = line.strip().split('####')[1].split(' for ')[0]
+ new_list.append(title)
+ print(title)
+ print([item for item, count in collections.Counter(new_list).items() if count > 1])
+
diff --git a/bin/xchromium b/bin/xchromium
new file mode 100755
index 0000000..b836a02
--- /dev/null
+++ b/bin/xchromium
@@ -0,0 +1,2 @@
+#! /bin/sh
+GDK_BACKEND=x11 chromium
diff --git a/bin/xslack b/bin/xslack
new file mode 100755
index 0000000..f360352
--- /dev/null
+++ b/bin/xslack
@@ -0,0 +1,2 @@
+#! /bin/sh
+GDK_BACKEND=x11 slack
diff --git a/bin/xvirtualbox b/bin/xvirtualbox
new file mode 100755
index 0000000..aca6406
--- /dev/null
+++ b/bin/xvirtualbox
@@ -0,0 +1,2 @@
+#! /bin/sh
+QT_QPA_PLATFORM=xcb VirtualBox
diff --git a/bin/xvivaldi b/bin/xvivaldi
new file mode 100755
index 0000000..283cf06
--- /dev/null
+++ b/bin/xvivaldi
@@ -0,0 +1,2 @@
+#! /bin/sh
+GDK_BACKEND=x11 vivaldi-stable
diff --git a/bin/xzoom b/bin/xzoom
new file mode 100755
index 0000000..edb5707
--- /dev/null
+++ b/bin/xzoom
@@ -0,0 +1,3 @@
+#! /bin/sh
+QT_QPA_PLATFORM=xcb
+GDK_BACKEND=x11 zoom