partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
valid
|
concat_chunks
|
Concatenate chunks. If multiple chunk files match to the same sample name
but with different barcodes (i.e., they are technical replicates) then this
will assign all the files to the same sample name file.
|
ipyrad/assemble/demultiplex.py
|
def concat_chunks(data, ipyclient):
"""
Concatenate chunks. If multiple chunk files match to the same sample name
but with different barcodes (i.e., they are technical replicates) then this
will assign all the files to the same sample name file.
"""
## collate files progress bar
start = time.time()
printstr = ' writing/compressing | {} | s1 |'
lbview = ipyclient.load_balanced_view()
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)
## get all the files
ftmps = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.fastq"))
## a dict to assign tmp files to names/reads
r1dict = {}
r2dict = {}
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
r1dict[sname] = []
r2dict[sname] = []
## assign to name keys
for ftmp in ftmps:
base, orient, _ = ftmp.rsplit("_", 2)
sname = base.rsplit("/", 1)[-1].split("tmp_", 1)[1]
if orient == "R1":
r1dict[sname].append(ftmp)
else:
r2dict[sname].append(ftmp)
## concatenate files
snames = []
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
snames.append(sname)
writers = []
for sname in set(snames):
tmp1s = sorted(r1dict[sname])
tmp2s = sorted(r2dict[sname])
writers.append(lbview.apply(collate_files, *[data, sname, tmp1s, tmp2s]))
total = len(writers)
while 1:
ready = [i.ready() for i in writers]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, sum(ready), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(ready):
print("")
break
|
def concat_chunks(data, ipyclient):
"""
Concatenate chunks. If multiple chunk files match to the same sample name
but with different barcodes (i.e., they are technical replicates) then this
will assign all the files to the same sample name file.
"""
## collate files progress bar
start = time.time()
printstr = ' writing/compressing | {} | s1 |'
lbview = ipyclient.load_balanced_view()
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)
## get all the files
ftmps = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.fastq"))
## a dict to assign tmp files to names/reads
r1dict = {}
r2dict = {}
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
r1dict[sname] = []
r2dict[sname] = []
## assign to name keys
for ftmp in ftmps:
base, orient, _ = ftmp.rsplit("_", 2)
sname = base.rsplit("/", 1)[-1].split("tmp_", 1)[1]
if orient == "R1":
r1dict[sname].append(ftmp)
else:
r2dict[sname].append(ftmp)
## concatenate files
snames = []
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
snames.append(sname)
writers = []
for sname in set(snames):
tmp1s = sorted(r1dict[sname])
tmp2s = sorted(r2dict[sname])
writers.append(lbview.apply(collate_files, *[data, sname, tmp1s, tmp2s]))
total = len(writers)
while 1:
ready = [i.ready() for i in writers]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, sum(ready), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(ready):
print("")
break
|
[
"Concatenate",
"chunks",
".",
"If",
"multiple",
"chunk",
"files",
"match",
"to",
"the",
"same",
"sample",
"name",
"but",
"with",
"different",
"barcodes",
"(",
"i",
".",
"e",
".",
"they",
"are",
"technical",
"replicates",
")",
"then",
"this",
"will",
"assign",
"all",
"the",
"files",
"to",
"the",
"same",
"sample",
"name",
"file",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/demultiplex.py#L1069-L1124
|
[
"def",
"concat_chunks",
"(",
"data",
",",
"ipyclient",
")",
":",
"## collate files progress bar",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"' writing/compressing | {} | s1 |'",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"10",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"## get all the files",
"ftmps",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"fastqs",
",",
"\"tmp_*.fastq\"",
")",
")",
"## a dict to assign tmp files to names/reads",
"r1dict",
"=",
"{",
"}",
"r2dict",
"=",
"{",
"}",
"for",
"sname",
"in",
"data",
".",
"barcodes",
":",
"if",
"\"-technical-replicate-\"",
"in",
"sname",
":",
"sname",
"=",
"sname",
".",
"rsplit",
"(",
"\"-technical-replicate\"",
",",
"1",
")",
"[",
"0",
"]",
"r1dict",
"[",
"sname",
"]",
"=",
"[",
"]",
"r2dict",
"[",
"sname",
"]",
"=",
"[",
"]",
"## assign to name keys",
"for",
"ftmp",
"in",
"ftmps",
":",
"base",
",",
"orient",
",",
"_",
"=",
"ftmp",
".",
"rsplit",
"(",
"\"_\"",
",",
"2",
")",
"sname",
"=",
"base",
".",
"rsplit",
"(",
"\"/\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\"tmp_\"",
",",
"1",
")",
"[",
"1",
"]",
"if",
"orient",
"==",
"\"R1\"",
":",
"r1dict",
"[",
"sname",
"]",
".",
"append",
"(",
"ftmp",
")",
"else",
":",
"r2dict",
"[",
"sname",
"]",
".",
"append",
"(",
"ftmp",
")",
"## concatenate files",
"snames",
"=",
"[",
"]",
"for",
"sname",
"in",
"data",
".",
"barcodes",
":",
"if",
"\"-technical-replicate-\"",
"in",
"sname",
":",
"sname",
"=",
"sname",
".",
"rsplit",
"(",
"\"-technical-replicate\"",
",",
"1",
")",
"[",
"0",
"]",
"snames",
".",
"append",
"(",
"sname",
")",
"writers",
"=",
"[",
"]",
"for",
"sname",
"in",
"set",
"(",
"snames",
")",
":",
"tmp1s",
"=",
"sorted",
"(",
"r1dict",
"[",
"sname",
"]",
")",
"tmp2s",
"=",
"sorted",
"(",
"r2dict",
"[",
"sname",
"]",
")",
"writers",
".",
"append",
"(",
"lbview",
".",
"apply",
"(",
"collate_files",
",",
"*",
"[",
"data",
",",
"sname",
",",
"tmp1s",
",",
"tmp2s",
"]",
")",
")",
"total",
"=",
"len",
"(",
"writers",
")",
"while",
"1",
":",
"ready",
"=",
"[",
"i",
".",
"ready",
"(",
")",
"for",
"i",
"in",
"writers",
"]",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"total",
",",
"sum",
"(",
"ready",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"all",
"(",
"ready",
")",
":",
"print",
"(",
"\"\"",
")",
"break"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
demux2
|
Submit chunks to be sorted by the barmatch() function then
calls putstats().
|
ipyrad/assemble/demultiplex.py
|
def demux2(data, chunkfiles, cutters, longbar, matchdict, ipyclient):
"""
Submit chunks to be sorted by the barmatch() function then
calls putstats().
"""
## parallel stuff, limit to 1/4 of available cores for RAM limits.
start = time.time()
printstr = ' sorting reads | {} | s1 |'
lbview = ipyclient.load_balanced_view(targets=ipyclient.ids[::4])
## store statcounters and async results in dicts
perfile = {}
filesort = {}
total = 0
done = 0
## chunkfiles is a dict with {handle: chunkslist, ...}. The func barmatch
## writes results to samplename files with PID number, and also writes a
## pickle for chunk specific results with fidx suffix, which it returns.
for handle, rawtuplist in chunkfiles.items():
## get args for job
for fidx, rawtuple in enumerate(rawtuplist):
#handle = os.path.splitext(os.path.basename(rawtuple[0]))[0]
args = (data, rawtuple, cutters, longbar, matchdict, fidx)
## submit the job
async = lbview.apply(barmatch, *args)
filesort[total] = (handle, async)
total += 1
## get ready to receive stats: 'total', 'cutfound', 'matched'
perfile[handle] = np.zeros(3, dtype=np.int)
## stats for each sample
fdbars = {}
fsamplehits = Counter()
fbarhits = Counter()
fmisses = Counter()
## a tuple to hold my dictionaries
statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars
## wait for jobs to finish
while 1:
fin = [i for i, j in filesort.items() if j[1].ready()]
#fin = [i for i in jobs if i[1].ready()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, done, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
## should we break?
if total == done:
print("")
break
## cleanup
for key in fin:
tup = filesort[key]
if tup[1].successful():
pfile = tup[1].result()
handle = tup[0]
if pfile:
## check if this needs to return data
putstats(pfile, handle, statdicts)
## purge to conserve memory
del filesort[key]
done += 1
return statdicts
|
def demux2(data, chunkfiles, cutters, longbar, matchdict, ipyclient):
"""
Submit chunks to be sorted by the barmatch() function then
calls putstats().
"""
## parallel stuff, limit to 1/4 of available cores for RAM limits.
start = time.time()
printstr = ' sorting reads | {} | s1 |'
lbview = ipyclient.load_balanced_view(targets=ipyclient.ids[::4])
## store statcounters and async results in dicts
perfile = {}
filesort = {}
total = 0
done = 0
## chunkfiles is a dict with {handle: chunkslist, ...}. The func barmatch
## writes results to samplename files with PID number, and also writes a
## pickle for chunk specific results with fidx suffix, which it returns.
for handle, rawtuplist in chunkfiles.items():
## get args for job
for fidx, rawtuple in enumerate(rawtuplist):
#handle = os.path.splitext(os.path.basename(rawtuple[0]))[0]
args = (data, rawtuple, cutters, longbar, matchdict, fidx)
## submit the job
async = lbview.apply(barmatch, *args)
filesort[total] = (handle, async)
total += 1
## get ready to receive stats: 'total', 'cutfound', 'matched'
perfile[handle] = np.zeros(3, dtype=np.int)
## stats for each sample
fdbars = {}
fsamplehits = Counter()
fbarhits = Counter()
fmisses = Counter()
## a tuple to hold my dictionaries
statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars
## wait for jobs to finish
while 1:
fin = [i for i, j in filesort.items() if j[1].ready()]
#fin = [i for i in jobs if i[1].ready()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, done, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
## should we break?
if total == done:
print("")
break
## cleanup
for key in fin:
tup = filesort[key]
if tup[1].successful():
pfile = tup[1].result()
handle = tup[0]
if pfile:
## check if this needs to return data
putstats(pfile, handle, statdicts)
## purge to conserve memory
del filesort[key]
done += 1
return statdicts
|
[
"Submit",
"chunks",
"to",
"be",
"sorted",
"by",
"the",
"barmatch",
"()",
"function",
"then",
"calls",
"putstats",
"()",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/demultiplex.py#L1273-L1341
|
[
"def",
"demux2",
"(",
"data",
",",
"chunkfiles",
",",
"cutters",
",",
"longbar",
",",
"matchdict",
",",
"ipyclient",
")",
":",
"## parallel stuff, limit to 1/4 of available cores for RAM limits.",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"' sorting reads | {} | s1 |'",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"ipyclient",
".",
"ids",
"[",
":",
":",
"4",
"]",
")",
"## store statcounters and async results in dicts",
"perfile",
"=",
"{",
"}",
"filesort",
"=",
"{",
"}",
"total",
"=",
"0",
"done",
"=",
"0",
"## chunkfiles is a dict with {handle: chunkslist, ...}. The func barmatch",
"## writes results to samplename files with PID number, and also writes a ",
"## pickle for chunk specific results with fidx suffix, which it returns.",
"for",
"handle",
",",
"rawtuplist",
"in",
"chunkfiles",
".",
"items",
"(",
")",
":",
"## get args for job",
"for",
"fidx",
",",
"rawtuple",
"in",
"enumerate",
"(",
"rawtuplist",
")",
":",
"#handle = os.path.splitext(os.path.basename(rawtuple[0]))[0]",
"args",
"=",
"(",
"data",
",",
"rawtuple",
",",
"cutters",
",",
"longbar",
",",
"matchdict",
",",
"fidx",
")",
"## submit the job",
"async",
"=",
"lbview",
".",
"apply",
"(",
"barmatch",
",",
"*",
"args",
")",
"filesort",
"[",
"total",
"]",
"=",
"(",
"handle",
",",
"async",
")",
"total",
"+=",
"1",
"## get ready to receive stats: 'total', 'cutfound', 'matched'",
"perfile",
"[",
"handle",
"]",
"=",
"np",
".",
"zeros",
"(",
"3",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"## stats for each sample",
"fdbars",
"=",
"{",
"}",
"fsamplehits",
"=",
"Counter",
"(",
")",
"fbarhits",
"=",
"Counter",
"(",
")",
"fmisses",
"=",
"Counter",
"(",
")",
"## a tuple to hold my dictionaries",
"statdicts",
"=",
"perfile",
",",
"fsamplehits",
",",
"fbarhits",
",",
"fmisses",
",",
"fdbars",
"## wait for jobs to finish",
"while",
"1",
":",
"fin",
"=",
"[",
"i",
"for",
"i",
",",
"j",
"in",
"filesort",
".",
"items",
"(",
")",
"if",
"j",
"[",
"1",
"]",
".",
"ready",
"(",
")",
"]",
"#fin = [i for i in jobs if i[1].ready()]",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"total",
",",
"done",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## should we break?",
"if",
"total",
"==",
"done",
":",
"print",
"(",
"\"\"",
")",
"break",
"## cleanup",
"for",
"key",
"in",
"fin",
":",
"tup",
"=",
"filesort",
"[",
"key",
"]",
"if",
"tup",
"[",
"1",
"]",
".",
"successful",
"(",
")",
":",
"pfile",
"=",
"tup",
"[",
"1",
"]",
".",
"result",
"(",
")",
"handle",
"=",
"tup",
"[",
"0",
"]",
"if",
"pfile",
":",
"## check if this needs to return data",
"putstats",
"(",
"pfile",
",",
"handle",
",",
"statdicts",
")",
"## purge to conserve memory",
"del",
"filesort",
"[",
"key",
"]",
"done",
"+=",
"1",
"return",
"statdicts"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
demux
|
submit chunks to be sorted
|
ipyrad/assemble/demultiplex.py
|
def demux(data, chunkfiles, cutters, longbar, matchdict, ipyclient):
""" submit chunks to be sorted """
## parallel stuff
start = time.time()
printstr = ' sorting reads | {} | s1 |'
lbview = ipyclient.load_balanced_view()
## store statcounters and async results in dicts
perfile = {}
filesort = {}
for handle, rawtuplist in chunkfiles.items():
## get args for job
for fidx, rawtuple in enumerate(rawtuplist):
#handle = os.path.splitext(os.path.basename(rawtuple[0]))[0]
args = (data, rawtuple, cutters, longbar, matchdict, fidx)
## submit the job
filesort[handle] = lbview.apply(barmatch, *args)
## get ready to receive stats: 'total', 'cutfound', 'matched'
perfile[handle] = np.zeros(3, dtype=np.int)
## stats for each sample
fdbars = {}
fsamplehits = Counter()
fbarhits = Counter()
fmisses = Counter()
## a tuple to hold my dictionaries
statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars
try:
kbd = 0
total = len(chunkfiles)
done = 0
## wait for jobs to finish
while 1:
fin = [i for i, j in filesort.items() if j.ready()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, done, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
## should we break?
if total == done:
print("")
break
## cleanup
for job in fin:
if filesort[job].successful():
pfile = filesort[job].result()
#if result:
if pfile:
## check if this needs to return data
putstats(pfile, handle, statdicts)
## purge to conserve memory
del filesort[job]
done += 1
## keep tacking progreess during writing stage
start = time.time()
printstr = ' writing/compressing | {} | s1 |'
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)
except KeyboardInterrupt:
## wait to cleanup
kbd = 1
raise
## only proceed here if barmatch jobs were not interrupted
else:
## collate files and do progress bar
ftmps = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.fastq"))
## a dict to assign tmp files to names/reads
r1dict = {}
r2dict = {}
for sname in data.barcodes:
r1dict[sname] = []
r2dict[sname] = []
## assign to name keys
for ftmp in ftmps:
## split names
base, orient, _ = ftmp.rsplit("_", 2)
sname = base.rsplit("/", 1)[-1].split("tmp_", 1)[1]
## put into dicts
if orient == "R1":
r1dict[sname].append(ftmp)
else:
r2dict[sname].append(ftmp)
## concatenate files
total = len(data.barcodes)
done = 0
## store asyncs of collate jobs
writers = []
for sname in data.barcodes:
tmp1s = sorted(r1dict[sname])
tmp2s = sorted(r2dict[sname])
writers.append(lbview.apply(collate_files,
*[data, sname, tmp1s, tmp2s]))
## track progress of collate jobs
while 1:
ready = [i.ready() for i in writers]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, sum(ready), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(ready):
print("")
break
finally:
## clean up junk files
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
os.remove(tmpf)
if kbd:
raise KeyboardInterrupt()
else:
## build stats from dictionaries
perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts
make_stats(data, perfile, fsamplehits, fbarhits, fmisses, fdbars)
|
def demux(data, chunkfiles, cutters, longbar, matchdict, ipyclient):
""" submit chunks to be sorted """
## parallel stuff
start = time.time()
printstr = ' sorting reads | {} | s1 |'
lbview = ipyclient.load_balanced_view()
## store statcounters and async results in dicts
perfile = {}
filesort = {}
for handle, rawtuplist in chunkfiles.items():
## get args for job
for fidx, rawtuple in enumerate(rawtuplist):
#handle = os.path.splitext(os.path.basename(rawtuple[0]))[0]
args = (data, rawtuple, cutters, longbar, matchdict, fidx)
## submit the job
filesort[handle] = lbview.apply(barmatch, *args)
## get ready to receive stats: 'total', 'cutfound', 'matched'
perfile[handle] = np.zeros(3, dtype=np.int)
## stats for each sample
fdbars = {}
fsamplehits = Counter()
fbarhits = Counter()
fmisses = Counter()
## a tuple to hold my dictionaries
statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars
try:
kbd = 0
total = len(chunkfiles)
done = 0
## wait for jobs to finish
while 1:
fin = [i for i, j in filesort.items() if j.ready()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, done, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
## should we break?
if total == done:
print("")
break
## cleanup
for job in fin:
if filesort[job].successful():
pfile = filesort[job].result()
#if result:
if pfile:
## check if this needs to return data
putstats(pfile, handle, statdicts)
## purge to conserve memory
del filesort[job]
done += 1
## keep tacking progreess during writing stage
start = time.time()
printstr = ' writing/compressing | {} | s1 |'
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)
except KeyboardInterrupt:
## wait to cleanup
kbd = 1
raise
## only proceed here if barmatch jobs were not interrupted
else:
## collate files and do progress bar
ftmps = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.fastq"))
## a dict to assign tmp files to names/reads
r1dict = {}
r2dict = {}
for sname in data.barcodes:
r1dict[sname] = []
r2dict[sname] = []
## assign to name keys
for ftmp in ftmps:
## split names
base, orient, _ = ftmp.rsplit("_", 2)
sname = base.rsplit("/", 1)[-1].split("tmp_", 1)[1]
## put into dicts
if orient == "R1":
r1dict[sname].append(ftmp)
else:
r2dict[sname].append(ftmp)
## concatenate files
total = len(data.barcodes)
done = 0
## store asyncs of collate jobs
writers = []
for sname in data.barcodes:
tmp1s = sorted(r1dict[sname])
tmp2s = sorted(r2dict[sname])
writers.append(lbview.apply(collate_files,
*[data, sname, tmp1s, tmp2s]))
## track progress of collate jobs
while 1:
ready = [i.ready() for i in writers]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, sum(ready), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(ready):
print("")
break
finally:
## clean up junk files
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
os.remove(tmpf)
if kbd:
raise KeyboardInterrupt()
else:
## build stats from dictionaries
perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts
make_stats(data, perfile, fsamplehits, fbarhits, fmisses, fdbars)
|
[
"submit",
"chunks",
"to",
"be",
"sorted"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/demultiplex.py#L1346-L1476
|
[
"def",
"demux",
"(",
"data",
",",
"chunkfiles",
",",
"cutters",
",",
"longbar",
",",
"matchdict",
",",
"ipyclient",
")",
":",
"## parallel stuff",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"' sorting reads | {} | s1 |'",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"## store statcounters and async results in dicts",
"perfile",
"=",
"{",
"}",
"filesort",
"=",
"{",
"}",
"for",
"handle",
",",
"rawtuplist",
"in",
"chunkfiles",
".",
"items",
"(",
")",
":",
"## get args for job",
"for",
"fidx",
",",
"rawtuple",
"in",
"enumerate",
"(",
"rawtuplist",
")",
":",
"#handle = os.path.splitext(os.path.basename(rawtuple[0]))[0]",
"args",
"=",
"(",
"data",
",",
"rawtuple",
",",
"cutters",
",",
"longbar",
",",
"matchdict",
",",
"fidx",
")",
"## submit the job",
"filesort",
"[",
"handle",
"]",
"=",
"lbview",
".",
"apply",
"(",
"barmatch",
",",
"*",
"args",
")",
"## get ready to receive stats: 'total', 'cutfound', 'matched'",
"perfile",
"[",
"handle",
"]",
"=",
"np",
".",
"zeros",
"(",
"3",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"## stats for each sample",
"fdbars",
"=",
"{",
"}",
"fsamplehits",
"=",
"Counter",
"(",
")",
"fbarhits",
"=",
"Counter",
"(",
")",
"fmisses",
"=",
"Counter",
"(",
")",
"## a tuple to hold my dictionaries",
"statdicts",
"=",
"perfile",
",",
"fsamplehits",
",",
"fbarhits",
",",
"fmisses",
",",
"fdbars",
"try",
":",
"kbd",
"=",
"0",
"total",
"=",
"len",
"(",
"chunkfiles",
")",
"done",
"=",
"0",
"## wait for jobs to finish",
"while",
"1",
":",
"fin",
"=",
"[",
"i",
"for",
"i",
",",
"j",
"in",
"filesort",
".",
"items",
"(",
")",
"if",
"j",
".",
"ready",
"(",
")",
"]",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"total",
",",
"done",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## should we break?",
"if",
"total",
"==",
"done",
":",
"print",
"(",
"\"\"",
")",
"break",
"## cleanup",
"for",
"job",
"in",
"fin",
":",
"if",
"filesort",
"[",
"job",
"]",
".",
"successful",
"(",
")",
":",
"pfile",
"=",
"filesort",
"[",
"job",
"]",
".",
"result",
"(",
")",
"#if result:",
"if",
"pfile",
":",
"## check if this needs to return data",
"putstats",
"(",
"pfile",
",",
"handle",
",",
"statdicts",
")",
"## purge to conserve memory",
"del",
"filesort",
"[",
"job",
"]",
"done",
"+=",
"1",
"## keep tacking progreess during writing stage",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"' writing/compressing | {} | s1 |'",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"10",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"except",
"KeyboardInterrupt",
":",
"## wait to cleanup",
"kbd",
"=",
"1",
"raise",
"## only proceed here if barmatch jobs were not interrupted",
"else",
":",
"## collate files and do progress bar",
"ftmps",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"fastqs",
",",
"\"tmp_*.fastq\"",
")",
")",
"## a dict to assign tmp files to names/reads",
"r1dict",
"=",
"{",
"}",
"r2dict",
"=",
"{",
"}",
"for",
"sname",
"in",
"data",
".",
"barcodes",
":",
"r1dict",
"[",
"sname",
"]",
"=",
"[",
"]",
"r2dict",
"[",
"sname",
"]",
"=",
"[",
"]",
"## assign to name keys",
"for",
"ftmp",
"in",
"ftmps",
":",
"## split names",
"base",
",",
"orient",
",",
"_",
"=",
"ftmp",
".",
"rsplit",
"(",
"\"_\"",
",",
"2",
")",
"sname",
"=",
"base",
".",
"rsplit",
"(",
"\"/\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\"tmp_\"",
",",
"1",
")",
"[",
"1",
"]",
"## put into dicts",
"if",
"orient",
"==",
"\"R1\"",
":",
"r1dict",
"[",
"sname",
"]",
".",
"append",
"(",
"ftmp",
")",
"else",
":",
"r2dict",
"[",
"sname",
"]",
".",
"append",
"(",
"ftmp",
")",
"## concatenate files",
"total",
"=",
"len",
"(",
"data",
".",
"barcodes",
")",
"done",
"=",
"0",
"## store asyncs of collate jobs",
"writers",
"=",
"[",
"]",
"for",
"sname",
"in",
"data",
".",
"barcodes",
":",
"tmp1s",
"=",
"sorted",
"(",
"r1dict",
"[",
"sname",
"]",
")",
"tmp2s",
"=",
"sorted",
"(",
"r2dict",
"[",
"sname",
"]",
")",
"writers",
".",
"append",
"(",
"lbview",
".",
"apply",
"(",
"collate_files",
",",
"*",
"[",
"data",
",",
"sname",
",",
"tmp1s",
",",
"tmp2s",
"]",
")",
")",
"## track progress of collate jobs",
"while",
"1",
":",
"ready",
"=",
"[",
"i",
".",
"ready",
"(",
")",
"for",
"i",
"in",
"writers",
"]",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"total",
",",
"sum",
"(",
"ready",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"all",
"(",
"ready",
")",
":",
"print",
"(",
"\"\"",
")",
"break",
"finally",
":",
"## clean up junk files",
"tmpfiles",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"fastqs",
",",
"\"tmp_*_R*.fastq\"",
")",
")",
"tmpfiles",
"+=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"fastqs",
",",
"\"tmp_*.p\"",
")",
")",
"for",
"tmpf",
"in",
"tmpfiles",
":",
"os",
".",
"remove",
"(",
"tmpf",
")",
"if",
"kbd",
":",
"raise",
"KeyboardInterrupt",
"(",
")",
"else",
":",
"## build stats from dictionaries",
"perfile",
",",
"fsamplehits",
",",
"fbarhits",
",",
"fmisses",
",",
"fdbars",
"=",
"statdicts",
"make_stats",
"(",
"data",
",",
"perfile",
",",
"fsamplehits",
",",
"fbarhits",
",",
"fmisses",
",",
"fdbars",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
putstats
|
puts stats from pickles into a dictionary
|
ipyrad/assemble/demultiplex.py
|
def putstats(pfile, handle, statdicts):
""" puts stats from pickles into a dictionary """
## load in stats
with open(pfile, 'r') as infile:
filestats, samplestats = pickle.load(infile)
## get dicts from statdicts tuple
perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts
## pull new stats
#handle = os.path.splitext(os.path.basename(handle))[0]
perfile[handle] += filestats
## update sample stats
samplehits, barhits, misses, dbars = samplestats
fsamplehits.update(samplehits)
fbarhits.update(barhits)
fmisses.update(misses)
fdbars.update(dbars)
## repack the tuple and return
statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars
return statdicts
|
def putstats(pfile, handle, statdicts):
""" puts stats from pickles into a dictionary """
## load in stats
with open(pfile, 'r') as infile:
filestats, samplestats = pickle.load(infile)
## get dicts from statdicts tuple
perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts
## pull new stats
#handle = os.path.splitext(os.path.basename(handle))[0]
perfile[handle] += filestats
## update sample stats
samplehits, barhits, misses, dbars = samplestats
fsamplehits.update(samplehits)
fbarhits.update(barhits)
fmisses.update(misses)
fdbars.update(dbars)
## repack the tuple and return
statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars
return statdicts
|
[
"puts",
"stats",
"from",
"pickles",
"into",
"a",
"dictionary"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/demultiplex.py#L1479-L1502
|
[
"def",
"putstats",
"(",
"pfile",
",",
"handle",
",",
"statdicts",
")",
":",
"## load in stats",
"with",
"open",
"(",
"pfile",
",",
"'r'",
")",
"as",
"infile",
":",
"filestats",
",",
"samplestats",
"=",
"pickle",
".",
"load",
"(",
"infile",
")",
"## get dicts from statdicts tuple",
"perfile",
",",
"fsamplehits",
",",
"fbarhits",
",",
"fmisses",
",",
"fdbars",
"=",
"statdicts",
"## pull new stats",
"#handle = os.path.splitext(os.path.basename(handle))[0]",
"perfile",
"[",
"handle",
"]",
"+=",
"filestats",
"## update sample stats",
"samplehits",
",",
"barhits",
",",
"misses",
",",
"dbars",
"=",
"samplestats",
"fsamplehits",
".",
"update",
"(",
"samplehits",
")",
"fbarhits",
".",
"update",
"(",
"barhits",
")",
"fmisses",
".",
"update",
"(",
"misses",
")",
"fdbars",
".",
"update",
"(",
"dbars",
")",
"## repack the tuple and return",
"statdicts",
"=",
"perfile",
",",
"fsamplehits",
",",
"fbarhits",
",",
"fmisses",
",",
"fdbars",
"return",
"statdicts"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
zcat_make_temps
|
Call bash command 'cat' and 'split' to split large files. The goal
is to create N splitfiles where N is a multiple of the number of processors
so that each processor can work on a file in parallel.
|
ipyrad/assemble/demultiplex.py
|
def zcat_make_temps(data, raws, num, tmpdir, optim, njobs, start):
"""
Call bash command 'cat' and 'split' to split large files. The goal
is to create N splitfiles where N is a multiple of the number of processors
so that each processor can work on a file in parallel.
"""
printstr = ' chunking large files | {} | s1 |'
## split args
tmpdir = os.path.realpath(tmpdir)
LOGGER.info("zcat is using optim = %s", optim)
## read it, is it gzipped?
catcmd = ["cat"]
if raws[0].endswith(".gz"):
catcmd = ["gunzip", "-c"]
## get reading commands for r1s, r2s
cmd1 = catcmd + [raws[0]]
cmd2 = catcmd + [raws[1]]
## second command splits and writes with name prefix
cmd3 = ["split", "-a", "4", "-l", str(int(optim)), "-",
os.path.join(tmpdir, "chunk1_"+str(num)+"_")]
cmd4 = ["split", "-a", "4", "-l", str(int(optim)), "-",
os.path.join(tmpdir, "chunk2_"+str(num)+"_")]
### run splitter
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
## wrap the actual call so we can kill it if anything goes awry
while 1:
try:
if not isinstance(proc3.poll(), int):
elapsed = datetime.timedelta(seconds=int(time.time()-start))
done = len(glob.glob(os.path.join(tmpdir, 'chunk1_*')))
progressbar(njobs, min(njobs, done), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
else:
res = proc3.communicate()[0]
proc1.stdout.close()
break
except KeyboardInterrupt:
proc1.kill()
proc3.kill()
raise KeyboardInterrupt()
if proc3.returncode:
raise IPyradWarningExit(" error in %s: %s", cmd3, res)
## grab output handles
chunks1 = glob.glob(os.path.join(tmpdir, "chunk1_"+str(num)+"_*"))
chunks1.sort()
if "pair" in data.paramsdict["datatype"]:
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout)
## wrap the actual call so we can kill it if anything goes awry
while 1:
try:
if not isinstance(proc4.poll(), int):
elapsed = datetime.timedelta(seconds=int(time.time()-start))
done = len(glob.glob(os.path.join(tmpdir, 'chunk1_*')))
progressbar(njobs, min(njobs, done), printstr.format(elapsed), data._spacer)
time.sleep(0.1)
else:
res = proc4.communicate()[0]
proc2.stdout.close()
break
except KeyboardInterrupt:
proc2.kill()
proc4.kill()
raise KeyboardInterrupt()
if proc4.returncode:
raise IPyradWarningExit(" error in %s: %s", cmd4, res)
## grab output handles
chunks2 = glob.glob(os.path.join(tmpdir, "chunk2_"+str(num)+"_*"))
chunks2.sort()
else:
chunks2 = [0]*len(chunks1)
assert len(chunks1) == len(chunks2), \
"R1 and R2 files are not the same length."
## ensure full progress bar b/c estimates njobs could be off
progressbar(10, 10, printstr.format(elapsed), spacer=data._spacer)
return zip(chunks1, chunks2)
|
def zcat_make_temps(data, raws, num, tmpdir, optim, njobs, start):
"""
Call bash command 'cat' and 'split' to split large files. The goal
is to create N splitfiles where N is a multiple of the number of processors
so that each processor can work on a file in parallel.
"""
printstr = ' chunking large files | {} | s1 |'
## split args
tmpdir = os.path.realpath(tmpdir)
LOGGER.info("zcat is using optim = %s", optim)
## read it, is it gzipped?
catcmd = ["cat"]
if raws[0].endswith(".gz"):
catcmd = ["gunzip", "-c"]
## get reading commands for r1s, r2s
cmd1 = catcmd + [raws[0]]
cmd2 = catcmd + [raws[1]]
## second command splits and writes with name prefix
cmd3 = ["split", "-a", "4", "-l", str(int(optim)), "-",
os.path.join(tmpdir, "chunk1_"+str(num)+"_")]
cmd4 = ["split", "-a", "4", "-l", str(int(optim)), "-",
os.path.join(tmpdir, "chunk2_"+str(num)+"_")]
### run splitter
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
## wrap the actual call so we can kill it if anything goes awry
while 1:
try:
if not isinstance(proc3.poll(), int):
elapsed = datetime.timedelta(seconds=int(time.time()-start))
done = len(glob.glob(os.path.join(tmpdir, 'chunk1_*')))
progressbar(njobs, min(njobs, done), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
else:
res = proc3.communicate()[0]
proc1.stdout.close()
break
except KeyboardInterrupt:
proc1.kill()
proc3.kill()
raise KeyboardInterrupt()
if proc3.returncode:
raise IPyradWarningExit(" error in %s: %s", cmd3, res)
## grab output handles
chunks1 = glob.glob(os.path.join(tmpdir, "chunk1_"+str(num)+"_*"))
chunks1.sort()
if "pair" in data.paramsdict["datatype"]:
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout)
## wrap the actual call so we can kill it if anything goes awry
while 1:
try:
if not isinstance(proc4.poll(), int):
elapsed = datetime.timedelta(seconds=int(time.time()-start))
done = len(glob.glob(os.path.join(tmpdir, 'chunk1_*')))
progressbar(njobs, min(njobs, done), printstr.format(elapsed), data._spacer)
time.sleep(0.1)
else:
res = proc4.communicate()[0]
proc2.stdout.close()
break
except KeyboardInterrupt:
proc2.kill()
proc4.kill()
raise KeyboardInterrupt()
if proc4.returncode:
raise IPyradWarningExit(" error in %s: %s", cmd4, res)
## grab output handles
chunks2 = glob.glob(os.path.join(tmpdir, "chunk2_"+str(num)+"_*"))
chunks2.sort()
else:
chunks2 = [0]*len(chunks1)
assert len(chunks1) == len(chunks2), \
"R1 and R2 files are not the same length."
## ensure full progress bar b/c estimates njobs could be off
progressbar(10, 10, printstr.format(elapsed), spacer=data._spacer)
return zip(chunks1, chunks2)
|
[
"Call",
"bash",
"command",
"cat",
"and",
"split",
"to",
"split",
"large",
"files",
".",
"The",
"goal",
"is",
"to",
"create",
"N",
"splitfiles",
"where",
"N",
"is",
"a",
"multiple",
"of",
"the",
"number",
"of",
"processors",
"so",
"that",
"each",
"processor",
"can",
"work",
"on",
"a",
"file",
"in",
"parallel",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/demultiplex.py#L1506-L1600
|
[
"def",
"zcat_make_temps",
"(",
"data",
",",
"raws",
",",
"num",
",",
"tmpdir",
",",
"optim",
",",
"njobs",
",",
"start",
")",
":",
"printstr",
"=",
"' chunking large files | {} | s1 |'",
"## split args",
"tmpdir",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"tmpdir",
")",
"LOGGER",
".",
"info",
"(",
"\"zcat is using optim = %s\"",
",",
"optim",
")",
"## read it, is it gzipped?",
"catcmd",
"=",
"[",
"\"cat\"",
"]",
"if",
"raws",
"[",
"0",
"]",
".",
"endswith",
"(",
"\".gz\"",
")",
":",
"catcmd",
"=",
"[",
"\"gunzip\"",
",",
"\"-c\"",
"]",
"## get reading commands for r1s, r2s",
"cmd1",
"=",
"catcmd",
"+",
"[",
"raws",
"[",
"0",
"]",
"]",
"cmd2",
"=",
"catcmd",
"+",
"[",
"raws",
"[",
"1",
"]",
"]",
"## second command splits and writes with name prefix",
"cmd3",
"=",
"[",
"\"split\"",
",",
"\"-a\"",
",",
"\"4\"",
",",
"\"-l\"",
",",
"str",
"(",
"int",
"(",
"optim",
")",
")",
",",
"\"-\"",
",",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"\"chunk1_\"",
"+",
"str",
"(",
"num",
")",
"+",
"\"_\"",
")",
"]",
"cmd4",
"=",
"[",
"\"split\"",
",",
"\"-a\"",
",",
"\"4\"",
",",
"\"-l\"",
",",
"str",
"(",
"int",
"(",
"optim",
")",
")",
",",
"\"-\"",
",",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"\"chunk2_\"",
"+",
"str",
"(",
"num",
")",
"+",
"\"_\"",
")",
"]",
"### run splitter",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmd1",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
")",
"proc3",
"=",
"sps",
".",
"Popen",
"(",
"cmd3",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"stdin",
"=",
"proc1",
".",
"stdout",
")",
"## wrap the actual call so we can kill it if anything goes awry",
"while",
"1",
":",
"try",
":",
"if",
"not",
"isinstance",
"(",
"proc3",
".",
"poll",
"(",
")",
",",
"int",
")",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"done",
"=",
"len",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"'chunk1_*'",
")",
")",
")",
"progressbar",
"(",
"njobs",
",",
"min",
"(",
"njobs",
",",
"done",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"else",
":",
"res",
"=",
"proc3",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"proc1",
".",
"stdout",
".",
"close",
"(",
")",
"break",
"except",
"KeyboardInterrupt",
":",
"proc1",
".",
"kill",
"(",
")",
"proc3",
".",
"kill",
"(",
")",
"raise",
"KeyboardInterrupt",
"(",
")",
"if",
"proc3",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\" error in %s: %s\"",
",",
"cmd3",
",",
"res",
")",
"## grab output handles",
"chunks1",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"\"chunk1_\"",
"+",
"str",
"(",
"num",
")",
"+",
"\"_*\"",
")",
")",
"chunks1",
".",
"sort",
"(",
")",
"if",
"\"pair\"",
"in",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
":",
"proc2",
"=",
"sps",
".",
"Popen",
"(",
"cmd2",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
")",
"proc4",
"=",
"sps",
".",
"Popen",
"(",
"cmd4",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"stdin",
"=",
"proc2",
".",
"stdout",
")",
"## wrap the actual call so we can kill it if anything goes awry",
"while",
"1",
":",
"try",
":",
"if",
"not",
"isinstance",
"(",
"proc4",
".",
"poll",
"(",
")",
",",
"int",
")",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"done",
"=",
"len",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"'chunk1_*'",
")",
")",
")",
"progressbar",
"(",
"njobs",
",",
"min",
"(",
"njobs",
",",
"done",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"else",
":",
"res",
"=",
"proc4",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"proc2",
".",
"stdout",
".",
"close",
"(",
")",
"break",
"except",
"KeyboardInterrupt",
":",
"proc2",
".",
"kill",
"(",
")",
"proc4",
".",
"kill",
"(",
")",
"raise",
"KeyboardInterrupt",
"(",
")",
"if",
"proc4",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\" error in %s: %s\"",
",",
"cmd4",
",",
"res",
")",
"## grab output handles",
"chunks2",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"\"chunk2_\"",
"+",
"str",
"(",
"num",
")",
"+",
"\"_*\"",
")",
")",
"chunks2",
".",
"sort",
"(",
")",
"else",
":",
"chunks2",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"chunks1",
")",
"assert",
"len",
"(",
"chunks1",
")",
"==",
"len",
"(",
"chunks2",
")",
",",
"\"R1 and R2 files are not the same length.\"",
"## ensure full progress bar b/c estimates njobs could be off",
"progressbar",
"(",
"10",
",",
"10",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"return",
"zip",
"(",
"chunks1",
",",
"chunks2",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_plotshare
|
make toyplot matrix fig
|
ipyrad/plotting/share_plot.py
|
def _plotshare(share, names, **kwargs):
""" make toyplot matrix fig"""
## set the colormap
colormap = toyplot.color.LinearMap(toyplot.color.brewer.palette("Spectral"),
domain_min=share.min(), domain_max=share.max())
## set up canvas
if not kwargs.get('width'):
width=900
else:
width = kwargs['width']
canvas = toyplot.Canvas(width=width, height=width*0.77778)
## order the dta
table = canvas.matrix((share, colormap),
bounds=(50, canvas.height-100,
50, canvas.height-100),
step=5, tshow=False, lshow=False)
## put a box around the table
table.body.grid.vlines[..., [0, -1]] = 'single'
table.body.grid.hlines[[0, -1], ...] = 'single'
## make hover info on grid
for i, j in itertools.product(range(len(share)), repeat=2):
table.body.cell(i,j).title = "%s, %s : %s" % (names[i], names[j], int(share[i,j]))
## create barplot
axes = canvas.cartesian(bounds=(665, 800, 90, 560))
## make a hover for barplot
zf = zip(names[::-1], share.diagonal()[::-1])
barfloater = ["%s: %s" % (i, int(j)) for i, j in zf]
## plot bars
axes.bars(share.diagonal()[::-1], along='y', title=barfloater)
## hide spine, move labels to the left,
## use taxon names, rotate angle, align
axes.y.spine.show = False
axes.y.ticks.labels.offset = 0
axes.y.ticks.locator = toyplot.locator.Explicit(range(len(names)),
labels=names[::-1])
axes.y.ticks.labels.angle = -90
axes.y.ticks.labels.style = {"baseline-shift":0,
"text-anchor":"end",
"font-size":"8px"}
## rotate xlabels, align with ticks, change to thousands, move up on canvas
## show ticks, and hide popup coordinates
axes.x.ticks.labels.angle = 90
axes.x.ticks.labels.offset = 20
axes.x.ticks.locator = toyplot.locator.Explicit(
range(0, int(share.max()),
int(share.max() / 10)),
["{}".format(i) for i in range(0, int(share.max()),
int(share.max() / 10))])
axes.x.ticks.labels.style = {"baseline-shift":0,
"text-anchor":"end",
"-toyplot-anchor-shift":"15px"}
axes.x.ticks.show = True
## add labels
label_style = {"font-size": "16px", "font-weight": "bold"}
canvas.text(300, 60, "Matrix of shared RAD loci", style=label_style)
canvas.text(700, 60, "N RAD loci per sample", style=label_style)
return canvas, axes
|
def _plotshare(share, names, **kwargs):
""" make toyplot matrix fig"""
## set the colormap
colormap = toyplot.color.LinearMap(toyplot.color.brewer.palette("Spectral"),
domain_min=share.min(), domain_max=share.max())
## set up canvas
if not kwargs.get('width'):
width=900
else:
width = kwargs['width']
canvas = toyplot.Canvas(width=width, height=width*0.77778)
## order the dta
table = canvas.matrix((share, colormap),
bounds=(50, canvas.height-100,
50, canvas.height-100),
step=5, tshow=False, lshow=False)
## put a box around the table
table.body.grid.vlines[..., [0, -1]] = 'single'
table.body.grid.hlines[[0, -1], ...] = 'single'
## make hover info on grid
for i, j in itertools.product(range(len(share)), repeat=2):
table.body.cell(i,j).title = "%s, %s : %s" % (names[i], names[j], int(share[i,j]))
## create barplot
axes = canvas.cartesian(bounds=(665, 800, 90, 560))
## make a hover for barplot
zf = zip(names[::-1], share.diagonal()[::-1])
barfloater = ["%s: %s" % (i, int(j)) for i, j in zf]
## plot bars
axes.bars(share.diagonal()[::-1], along='y', title=barfloater)
## hide spine, move labels to the left,
## use taxon names, rotate angle, align
axes.y.spine.show = False
axes.y.ticks.labels.offset = 0
axes.y.ticks.locator = toyplot.locator.Explicit(range(len(names)),
labels=names[::-1])
axes.y.ticks.labels.angle = -90
axes.y.ticks.labels.style = {"baseline-shift":0,
"text-anchor":"end",
"font-size":"8px"}
## rotate xlabels, align with ticks, change to thousands, move up on canvas
## show ticks, and hide popup coordinates
axes.x.ticks.labels.angle = 90
axes.x.ticks.labels.offset = 20
axes.x.ticks.locator = toyplot.locator.Explicit(
range(0, int(share.max()),
int(share.max() / 10)),
["{}".format(i) for i in range(0, int(share.max()),
int(share.max() / 10))])
axes.x.ticks.labels.style = {"baseline-shift":0,
"text-anchor":"end",
"-toyplot-anchor-shift":"15px"}
axes.x.ticks.show = True
## add labels
label_style = {"font-size": "16px", "font-weight": "bold"}
canvas.text(300, 60, "Matrix of shared RAD loci", style=label_style)
canvas.text(700, 60, "N RAD loci per sample", style=label_style)
return canvas, axes
|
[
"make",
"toyplot",
"matrix",
"fig"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/plotting/share_plot.py#L54-L122
|
[
"def",
"_plotshare",
"(",
"share",
",",
"names",
",",
"*",
"*",
"kwargs",
")",
":",
"## set the colormap",
"colormap",
"=",
"toyplot",
".",
"color",
".",
"LinearMap",
"(",
"toyplot",
".",
"color",
".",
"brewer",
".",
"palette",
"(",
"\"Spectral\"",
")",
",",
"domain_min",
"=",
"share",
".",
"min",
"(",
")",
",",
"domain_max",
"=",
"share",
".",
"max",
"(",
")",
")",
"## set up canvas",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'width'",
")",
":",
"width",
"=",
"900",
"else",
":",
"width",
"=",
"kwargs",
"[",
"'width'",
"]",
"canvas",
"=",
"toyplot",
".",
"Canvas",
"(",
"width",
"=",
"width",
",",
"height",
"=",
"width",
"*",
"0.77778",
")",
"## order the dta",
"table",
"=",
"canvas",
".",
"matrix",
"(",
"(",
"share",
",",
"colormap",
")",
",",
"bounds",
"=",
"(",
"50",
",",
"canvas",
".",
"height",
"-",
"100",
",",
"50",
",",
"canvas",
".",
"height",
"-",
"100",
")",
",",
"step",
"=",
"5",
",",
"tshow",
"=",
"False",
",",
"lshow",
"=",
"False",
")",
"## put a box around the table",
"table",
".",
"body",
".",
"grid",
".",
"vlines",
"[",
"...",
",",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"=",
"'single'",
"table",
".",
"body",
".",
"grid",
".",
"hlines",
"[",
"[",
"0",
",",
"-",
"1",
"]",
",",
"...",
"]",
"=",
"'single'",
"## make hover info on grid",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"product",
"(",
"range",
"(",
"len",
"(",
"share",
")",
")",
",",
"repeat",
"=",
"2",
")",
":",
"table",
".",
"body",
".",
"cell",
"(",
"i",
",",
"j",
")",
".",
"title",
"=",
"\"%s, %s : %s\"",
"%",
"(",
"names",
"[",
"i",
"]",
",",
"names",
"[",
"j",
"]",
",",
"int",
"(",
"share",
"[",
"i",
",",
"j",
"]",
")",
")",
"## create barplot",
"axes",
"=",
"canvas",
".",
"cartesian",
"(",
"bounds",
"=",
"(",
"665",
",",
"800",
",",
"90",
",",
"560",
")",
")",
"## make a hover for barplot",
"zf",
"=",
"zip",
"(",
"names",
"[",
":",
":",
"-",
"1",
"]",
",",
"share",
".",
"diagonal",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
")",
"barfloater",
"=",
"[",
"\"%s: %s\"",
"%",
"(",
"i",
",",
"int",
"(",
"j",
")",
")",
"for",
"i",
",",
"j",
"in",
"zf",
"]",
"## plot bars",
"axes",
".",
"bars",
"(",
"share",
".",
"diagonal",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
",",
"along",
"=",
"'y'",
",",
"title",
"=",
"barfloater",
")",
"## hide spine, move labels to the left, ",
"## use taxon names, rotate angle, align",
"axes",
".",
"y",
".",
"spine",
".",
"show",
"=",
"False",
"axes",
".",
"y",
".",
"ticks",
".",
"labels",
".",
"offset",
"=",
"0",
"axes",
".",
"y",
".",
"ticks",
".",
"locator",
"=",
"toyplot",
".",
"locator",
".",
"Explicit",
"(",
"range",
"(",
"len",
"(",
"names",
")",
")",
",",
"labels",
"=",
"names",
"[",
":",
":",
"-",
"1",
"]",
")",
"axes",
".",
"y",
".",
"ticks",
".",
"labels",
".",
"angle",
"=",
"-",
"90",
"axes",
".",
"y",
".",
"ticks",
".",
"labels",
".",
"style",
"=",
"{",
"\"baseline-shift\"",
":",
"0",
",",
"\"text-anchor\"",
":",
"\"end\"",
",",
"\"font-size\"",
":",
"\"8px\"",
"}",
"## rotate xlabels, align with ticks, change to thousands, move up on canvas",
"## show ticks, and hide popup coordinates",
"axes",
".",
"x",
".",
"ticks",
".",
"labels",
".",
"angle",
"=",
"90",
"axes",
".",
"x",
".",
"ticks",
".",
"labels",
".",
"offset",
"=",
"20",
"axes",
".",
"x",
".",
"ticks",
".",
"locator",
"=",
"toyplot",
".",
"locator",
".",
"Explicit",
"(",
"range",
"(",
"0",
",",
"int",
"(",
"share",
".",
"max",
"(",
")",
")",
",",
"int",
"(",
"share",
".",
"max",
"(",
")",
"/",
"10",
")",
")",
",",
"[",
"\"{}\"",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"share",
".",
"max",
"(",
")",
")",
",",
"int",
"(",
"share",
".",
"max",
"(",
")",
"/",
"10",
")",
")",
"]",
")",
"axes",
".",
"x",
".",
"ticks",
".",
"labels",
".",
"style",
"=",
"{",
"\"baseline-shift\"",
":",
"0",
",",
"\"text-anchor\"",
":",
"\"end\"",
",",
"\"-toyplot-anchor-shift\"",
":",
"\"15px\"",
"}",
"axes",
".",
"x",
".",
"ticks",
".",
"show",
"=",
"True",
"## add labels",
"label_style",
"=",
"{",
"\"font-size\"",
":",
"\"16px\"",
",",
"\"font-weight\"",
":",
"\"bold\"",
"}",
"canvas",
".",
"text",
"(",
"300",
",",
"60",
",",
"\"Matrix of shared RAD loci\"",
",",
"style",
"=",
"label_style",
")",
"canvas",
".",
"text",
"(",
"700",
",",
"60",
",",
"\"N RAD loci per sample\"",
",",
"style",
"=",
"label_style",
")",
"return",
"canvas",
",",
"axes"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_getarray
|
parse the loci file list and return presence/absence matrix
ordered by the tips on the tree
|
ipyrad/plotting/share_plot.py
|
def _getarray(loci, tree):
"""
parse the loci file list and return presence/absence matrix
ordered by the tips on the tree
"""
## order tips
tree.ladderize()
## get tip names
snames = tree.get_leaf_names()
## make an empty matrix
lxs = np.zeros((len(snames), len(loci)), dtype=np.int)
## fill the matrix
for loc in xrange(len(loci)):
for seq in loci[loc].split("\n")[:-1]:
lxs[snames.index(seq.split()[0]), loc] += 1
return lxs, snames
|
def _getarray(loci, tree):
"""
parse the loci file list and return presence/absence matrix
ordered by the tips on the tree
"""
## order tips
tree.ladderize()
## get tip names
snames = tree.get_leaf_names()
## make an empty matrix
lxs = np.zeros((len(snames), len(loci)), dtype=np.int)
## fill the matrix
for loc in xrange(len(loci)):
for seq in loci[loc].split("\n")[:-1]:
lxs[snames.index(seq.split()[0]), loc] += 1
return lxs, snames
|
[
"parse",
"the",
"loci",
"file",
"list",
"and",
"return",
"presence",
"/",
"absence",
"matrix",
"ordered",
"by",
"the",
"tips",
"on",
"the",
"tree"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/plotting/share_plot.py#L126-L146
|
[
"def",
"_getarray",
"(",
"loci",
",",
"tree",
")",
":",
"## order tips",
"tree",
".",
"ladderize",
"(",
")",
"## get tip names",
"snames",
"=",
"tree",
".",
"get_leaf_names",
"(",
")",
"## make an empty matrix",
"lxs",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"snames",
")",
",",
"len",
"(",
"loci",
")",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"## fill the matrix",
"for",
"loc",
"in",
"xrange",
"(",
"len",
"(",
"loci",
")",
")",
":",
"for",
"seq",
"in",
"loci",
"[",
"loc",
"]",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
":",
"-",
"1",
"]",
":",
"lxs",
"[",
"snames",
".",
"index",
"(",
"seq",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
",",
"loc",
"]",
"+=",
"1",
"return",
"lxs",
",",
"snames"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_countmatrix
|
fill a matrix with pairwise data sharing
|
ipyrad/plotting/share_plot.py
|
def _countmatrix(lxs):
""" fill a matrix with pairwise data sharing """
## an empty matrix
share = np.zeros((lxs.shape[0], lxs.shape[0]))
## fill above
names = range(lxs.shape[0])
for row in lxs:
for samp1, samp2 in itertools.combinations(names, 2):
shared = lxs[samp1, lxs[samp2] > 0].sum()
share[samp1, samp2] = shared
## mirror below
##share[]
## fill diagonal with total sample coverage
for row in xrange(len(names)):
share[row, row] = lxs[row].sum()
return share
|
def _countmatrix(lxs):
""" fill a matrix with pairwise data sharing """
## an empty matrix
share = np.zeros((lxs.shape[0], lxs.shape[0]))
## fill above
names = range(lxs.shape[0])
for row in lxs:
for samp1, samp2 in itertools.combinations(names, 2):
shared = lxs[samp1, lxs[samp2] > 0].sum()
share[samp1, samp2] = shared
## mirror below
##share[]
## fill diagonal with total sample coverage
for row in xrange(len(names)):
share[row, row] = lxs[row].sum()
return share
|
[
"fill",
"a",
"matrix",
"with",
"pairwise",
"data",
"sharing"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/plotting/share_plot.py#L150-L170
|
[
"def",
"_countmatrix",
"(",
"lxs",
")",
":",
"## an empty matrix",
"share",
"=",
"np",
".",
"zeros",
"(",
"(",
"lxs",
".",
"shape",
"[",
"0",
"]",
",",
"lxs",
".",
"shape",
"[",
"0",
"]",
")",
")",
"## fill above",
"names",
"=",
"range",
"(",
"lxs",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"row",
"in",
"lxs",
":",
"for",
"samp1",
",",
"samp2",
"in",
"itertools",
".",
"combinations",
"(",
"names",
",",
"2",
")",
":",
"shared",
"=",
"lxs",
"[",
"samp1",
",",
"lxs",
"[",
"samp2",
"]",
">",
"0",
"]",
".",
"sum",
"(",
")",
"share",
"[",
"samp1",
",",
"samp2",
"]",
"=",
"shared",
"## mirror below",
"##share[]",
"## fill diagonal with total sample coverage",
"for",
"row",
"in",
"xrange",
"(",
"len",
"(",
"names",
")",
")",
":",
"share",
"[",
"row",
",",
"row",
"]",
"=",
"lxs",
"[",
"row",
"]",
".",
"sum",
"(",
")",
"return",
"share"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
paramname
|
Get the param name from the dict index value.
|
ipyrad/core/paramsinfo.py
|
def paramname(param=""):
""" Get the param name from the dict index value.
"""
try:
name = pinfo[str(param)][0].strip().split(" ")[1]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return name
|
def paramname(param=""):
""" Get the param name from the dict index value.
"""
try:
name = pinfo[str(param)][0].strip().split(" ")[1]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return name
|
[
"Get",
"the",
"param",
"name",
"from",
"the",
"dict",
"index",
"value",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/paramsinfo.py#L415-L430
|
[
"def",
"paramname",
"(",
"param",
"=",
"\"\"",
")",
":",
"try",
":",
"name",
"=",
"pinfo",
"[",
"str",
"(",
"param",
")",
"]",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"[",
"1",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
"as",
"err",
":",
"## TODO: paramsinfo get description by param string not working.",
"## It would be cool to have an assembly object bcz then you could",
"## just do this:",
"##",
"## print(pinfo[data.paramsinfo.keys().index(param)])",
"print",
"(",
"\"\\tKey name/number not recognized - \"",
".",
"format",
"(",
"param",
")",
",",
"err",
")",
"raise",
"return",
"name"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
paraminfo
|
Returns detailed information for the numbered parameter.
Further information is available in the tutorial.
Unlike params() this function doesn't deal well with *
It only takes one parameter at a time and returns the desc
|
ipyrad/core/paramsinfo.py
|
def paraminfo(param="", short=False):
""" Returns detailed information for the numbered parameter.
Further information is available in the tutorial.
Unlike params() this function doesn't deal well with *
It only takes one parameter at a time and returns the desc
"""
## If the short flag is set return the short description, otherwise
## return the long.
if short:
desc = 1
else:
desc = 0
try:
description = pinfo[str(param)][desc]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return description
|
def paraminfo(param="", short=False):
""" Returns detailed information for the numbered parameter.
Further information is available in the tutorial.
Unlike params() this function doesn't deal well with *
It only takes one parameter at a time and returns the desc
"""
## If the short flag is set return the short description, otherwise
## return the long.
if short:
desc = 1
else:
desc = 0
try:
description = pinfo[str(param)][desc]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return description
|
[
"Returns",
"detailed",
"information",
"for",
"the",
"numbered",
"parameter",
".",
"Further",
"information",
"is",
"available",
"in",
"the",
"tutorial",
".",
"Unlike",
"params",
"()",
"this",
"function",
"doesn",
"t",
"deal",
"well",
"with",
"*",
"It",
"only",
"takes",
"one",
"parameter",
"at",
"a",
"time",
"and",
"returns",
"the",
"desc"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/paramsinfo.py#L433-L458
|
[
"def",
"paraminfo",
"(",
"param",
"=",
"\"\"",
",",
"short",
"=",
"False",
")",
":",
"## If the short flag is set return the short description, otherwise",
"## return the long.",
"if",
"short",
":",
"desc",
"=",
"1",
"else",
":",
"desc",
"=",
"0",
"try",
":",
"description",
"=",
"pinfo",
"[",
"str",
"(",
"param",
")",
"]",
"[",
"desc",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
"as",
"err",
":",
"## TODO: paramsinfo get description by param string not working.",
"## It would be cool to have an assembly object bcz then you could",
"## just do this:",
"##",
"## print(pinfo[data.paramsinfo.keys().index(param)])",
"print",
"(",
"\"\\tKey name/number not recognized - \"",
".",
"format",
"(",
"param",
")",
",",
"err",
")",
"raise",
"return",
"description"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
paramsinfo
|
This is the human readable version of the paramsinfo() function.
You give it a param and it prints to stdout.
|
ipyrad/core/paramsinfo.py
|
def paramsinfo(param="", short=False):
""" This is the human readable version of the paramsinfo() function.
You give it a param and it prints to stdout.
"""
if short:
desc = 1
else:
desc = 0
if param == "*":
for key in pinfo:
print(pinfo[str(key)][desc])
elif param:
try:
print(pinfo[str(param)][desc])
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized", err)
raise
else:
print("Enter a name or number for explanation of the parameter\n")
for key in pinfo:
print(pinfo[str(key)][desc].split("\n")[1][2:-10])
|
def paramsinfo(param="", short=False):
""" This is the human readable version of the paramsinfo() function.
You give it a param and it prints to stdout.
"""
if short:
desc = 1
else:
desc = 0
if param == "*":
for key in pinfo:
print(pinfo[str(key)][desc])
elif param:
try:
print(pinfo[str(param)][desc])
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized", err)
raise
else:
print("Enter a name or number for explanation of the parameter\n")
for key in pinfo:
print(pinfo[str(key)][desc].split("\n")[1][2:-10])
|
[
"This",
"is",
"the",
"human",
"readable",
"version",
"of",
"the",
"paramsinfo",
"()",
"function",
".",
"You",
"give",
"it",
"a",
"param",
"and",
"it",
"prints",
"to",
"stdout",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/paramsinfo.py#L461-L487
|
[
"def",
"paramsinfo",
"(",
"param",
"=",
"\"\"",
",",
"short",
"=",
"False",
")",
":",
"if",
"short",
":",
"desc",
"=",
"1",
"else",
":",
"desc",
"=",
"0",
"if",
"param",
"==",
"\"*\"",
":",
"for",
"key",
"in",
"pinfo",
":",
"print",
"(",
"pinfo",
"[",
"str",
"(",
"key",
")",
"]",
"[",
"desc",
"]",
")",
"elif",
"param",
":",
"try",
":",
"print",
"(",
"pinfo",
"[",
"str",
"(",
"param",
")",
"]",
"[",
"desc",
"]",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
"as",
"err",
":",
"## TODO: paramsinfo get description by param string not working.",
"## It would be cool to have an assembly object bcz then you could",
"## just do this:",
"##",
"## print(pinfo[data.paramsinfo.keys().index(param)])",
"print",
"(",
"\"\\tKey name/number not recognized\"",
",",
"err",
")",
"raise",
"else",
":",
"print",
"(",
"\"Enter a name or number for explanation of the parameter\\n\"",
")",
"for",
"key",
"in",
"pinfo",
":",
"print",
"(",
"pinfo",
"[",
"str",
"(",
"key",
")",
"]",
"[",
"desc",
"]",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"1",
"]",
"[",
"2",
":",
"-",
"10",
"]",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
update_assembly
|
Create a new Assembly() and convert as many of our old params to the new
version as we can. Also report out any parameters that are removed
and what their values are.
|
ipyrad/load/load.py
|
def update_assembly(data):
"""
Create a new Assembly() and convert as many of our old params to the new
version as we can. Also report out any parameters that are removed
and what their values are.
"""
print("##############################################################")
print("Updating assembly to current version")
## New assembly object to update pdate from.
new_assembly = ip.Assembly("update", quiet=True)
## Hackersonly dict gets automatically overwritten
## Always use the current version for params in this dict.
data._hackersonly = deepcopy(new_assembly._hackersonly)
new_params = set(new_assembly.paramsdict.keys())
my_params = set(data.paramsdict.keys())
## Find all params in loaded assembly that aren't in the new assembly.
## Make a new dict that doesn't include anything in removed_params
removed_params = my_params.difference(new_params)
for i in removed_params:
print("Removing parameter: {} = {}".format(i, data.paramsdict[i]))
## Find all params that are in the new paramsdict and not in the old one.
## If the set isn't emtpy then we create a new dictionary based on the new
## assembly parameters and populated with currently loaded assembly values.
## Conditioning on not including any removed params. Magic.
added_params = new_params.difference(my_params)
for i in added_params:
print("Adding parameter: {} = {}".format(i, new_assembly.paramsdict[i]))
print("\nPlease take note of these changes. Every effort is made to\n"\
+"ensure compatibility across versions of ipyrad. See online\n"\
+"documentation for further details about new parameters.")
time.sleep(5)
print("##############################################################")
if added_params:
for i in data.paramsdict:
if i not in removed_params:
new_assembly.paramsdict[i] = data.paramsdict[i]
data.paramsdict = deepcopy(new_assembly.paramsdict)
data.save()
return data
|
def update_assembly(data):
"""
Create a new Assembly() and convert as many of our old params to the new
version as we can. Also report out any parameters that are removed
and what their values are.
"""
print("##############################################################")
print("Updating assembly to current version")
## New assembly object to update pdate from.
new_assembly = ip.Assembly("update", quiet=True)
## Hackersonly dict gets automatically overwritten
## Always use the current version for params in this dict.
data._hackersonly = deepcopy(new_assembly._hackersonly)
new_params = set(new_assembly.paramsdict.keys())
my_params = set(data.paramsdict.keys())
## Find all params in loaded assembly that aren't in the new assembly.
## Make a new dict that doesn't include anything in removed_params
removed_params = my_params.difference(new_params)
for i in removed_params:
print("Removing parameter: {} = {}".format(i, data.paramsdict[i]))
## Find all params that are in the new paramsdict and not in the old one.
## If the set isn't emtpy then we create a new dictionary based on the new
## assembly parameters and populated with currently loaded assembly values.
## Conditioning on not including any removed params. Magic.
added_params = new_params.difference(my_params)
for i in added_params:
print("Adding parameter: {} = {}".format(i, new_assembly.paramsdict[i]))
print("\nPlease take note of these changes. Every effort is made to\n"\
+"ensure compatibility across versions of ipyrad. See online\n"\
+"documentation for further details about new parameters.")
time.sleep(5)
print("##############################################################")
if added_params:
for i in data.paramsdict:
if i not in removed_params:
new_assembly.paramsdict[i] = data.paramsdict[i]
data.paramsdict = deepcopy(new_assembly.paramsdict)
data.save()
return data
|
[
"Create",
"a",
"new",
"Assembly",
"()",
"and",
"convert",
"as",
"many",
"of",
"our",
"old",
"params",
"to",
"the",
"new",
"version",
"as",
"we",
"can",
".",
"Also",
"report",
"out",
"any",
"parameters",
"that",
"are",
"removed",
"and",
"what",
"their",
"values",
"are",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/load/load.py#L53-L100
|
[
"def",
"update_assembly",
"(",
"data",
")",
":",
"print",
"(",
"\"##############################################################\"",
")",
"print",
"(",
"\"Updating assembly to current version\"",
")",
"## New assembly object to update pdate from.",
"new_assembly",
"=",
"ip",
".",
"Assembly",
"(",
"\"update\"",
",",
"quiet",
"=",
"True",
")",
"## Hackersonly dict gets automatically overwritten",
"## Always use the current version for params in this dict.",
"data",
".",
"_hackersonly",
"=",
"deepcopy",
"(",
"new_assembly",
".",
"_hackersonly",
")",
"new_params",
"=",
"set",
"(",
"new_assembly",
".",
"paramsdict",
".",
"keys",
"(",
")",
")",
"my_params",
"=",
"set",
"(",
"data",
".",
"paramsdict",
".",
"keys",
"(",
")",
")",
"## Find all params in loaded assembly that aren't in the new assembly.",
"## Make a new dict that doesn't include anything in removed_params",
"removed_params",
"=",
"my_params",
".",
"difference",
"(",
"new_params",
")",
"for",
"i",
"in",
"removed_params",
":",
"print",
"(",
"\"Removing parameter: {} = {}\"",
".",
"format",
"(",
"i",
",",
"data",
".",
"paramsdict",
"[",
"i",
"]",
")",
")",
"## Find all params that are in the new paramsdict and not in the old one.",
"## If the set isn't emtpy then we create a new dictionary based on the new",
"## assembly parameters and populated with currently loaded assembly values.",
"## Conditioning on not including any removed params. Magic.",
"added_params",
"=",
"new_params",
".",
"difference",
"(",
"my_params",
")",
"for",
"i",
"in",
"added_params",
":",
"print",
"(",
"\"Adding parameter: {} = {}\"",
".",
"format",
"(",
"i",
",",
"new_assembly",
".",
"paramsdict",
"[",
"i",
"]",
")",
")",
"print",
"(",
"\"\\nPlease take note of these changes. Every effort is made to\\n\"",
"+",
"\"ensure compatibility across versions of ipyrad. See online\\n\"",
"+",
"\"documentation for further details about new parameters.\"",
")",
"time",
".",
"sleep",
"(",
"5",
")",
"print",
"(",
"\"##############################################################\"",
")",
"if",
"added_params",
":",
"for",
"i",
"in",
"data",
".",
"paramsdict",
":",
"if",
"i",
"not",
"in",
"removed_params",
":",
"new_assembly",
".",
"paramsdict",
"[",
"i",
"]",
"=",
"data",
".",
"paramsdict",
"[",
"i",
"]",
"data",
".",
"paramsdict",
"=",
"deepcopy",
"(",
"new_assembly",
".",
"paramsdict",
")",
"data",
".",
"save",
"(",
")",
"return",
"data"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
save_json2
|
save to json.
|
ipyrad/load/load.py
|
def save_json2(data):
""" save to json."""
## convert everything to dicts
## skip _ipcluster cuz it's made new.
datadict = OrderedDict([
("outfiles", data.__dict__["outfiles"]),
("stats_files", dict(data.__dict__["stats_files"])),
("stats_dfs", data.__dict__["stats_dfs"])
])
|
def save_json2(data):
""" save to json."""
## convert everything to dicts
## skip _ipcluster cuz it's made new.
datadict = OrderedDict([
("outfiles", data.__dict__["outfiles"]),
("stats_files", dict(data.__dict__["stats_files"])),
("stats_dfs", data.__dict__["stats_dfs"])
])
|
[
"save",
"to",
"json",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/load/load.py#L104-L113
|
[
"def",
"save_json2",
"(",
"data",
")",
":",
"## convert everything to dicts",
"## skip _ipcluster cuz it's made new.",
"datadict",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"outfiles\"",
",",
"data",
".",
"__dict__",
"[",
"\"outfiles\"",
"]",
")",
",",
"(",
"\"stats_files\"",
",",
"dict",
"(",
"data",
".",
"__dict__",
"[",
"\"stats_files\"",
"]",
")",
")",
",",
"(",
"\"stats_dfs\"",
",",
"data",
".",
"__dict__",
"[",
"\"stats_dfs\"",
"]",
")",
"]",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
save_json
|
Save assembly and samples as json
|
ipyrad/load/load.py
|
def save_json(data):
""" Save assembly and samples as json """
## data as dict
#### skip _ipcluster because it's made new
#### skip _headers because it's loaded new
#### statsfiles save only keys
#### samples save only keys
datadict = OrderedDict([
("_version", data.__dict__["_version"]),
("_checkpoint", data.__dict__["_checkpoint"]),
("name", data.__dict__["name"]),
("dirs", data.__dict__["dirs"]),
("paramsdict", data.__dict__["paramsdict"]),
("samples", data.__dict__["samples"].keys()),
("populations", data.__dict__["populations"]),
("database", data.__dict__["database"]),
("clust_database", data.__dict__["clust_database"]),
("outfiles", data.__dict__["outfiles"]),
("barcodes", data.__dict__["barcodes"]),
("stats_files", data.__dict__["stats_files"]),
("_hackersonly", data.__dict__["_hackersonly"]),
])
## sample dict
sampledict = OrderedDict([])
for key, sample in data.samples.iteritems():
sampledict[key] = sample._to_fulldict()
## json format it using cumstom Encoder class
fulldumps = json.dumps({
"assembly": datadict,
"samples": sampledict
},
cls=Encoder,
sort_keys=False, indent=4, separators=(",", ":"),
)
## save to file
assemblypath = os.path.join(data.dirs.project, data.name+".json")
if not os.path.exists(data.dirs.project):
os.mkdir(data.dirs.project)
## protect save from interruption
done = 0
while not done:
try:
with open(assemblypath, 'w') as jout:
jout.write(fulldumps)
done = 1
except (KeyboardInterrupt, SystemExit):
print('.')
continue
|
def save_json(data):
""" Save assembly and samples as json """
## data as dict
#### skip _ipcluster because it's made new
#### skip _headers because it's loaded new
#### statsfiles save only keys
#### samples save only keys
datadict = OrderedDict([
("_version", data.__dict__["_version"]),
("_checkpoint", data.__dict__["_checkpoint"]),
("name", data.__dict__["name"]),
("dirs", data.__dict__["dirs"]),
("paramsdict", data.__dict__["paramsdict"]),
("samples", data.__dict__["samples"].keys()),
("populations", data.__dict__["populations"]),
("database", data.__dict__["database"]),
("clust_database", data.__dict__["clust_database"]),
("outfiles", data.__dict__["outfiles"]),
("barcodes", data.__dict__["barcodes"]),
("stats_files", data.__dict__["stats_files"]),
("_hackersonly", data.__dict__["_hackersonly"]),
])
## sample dict
sampledict = OrderedDict([])
for key, sample in data.samples.iteritems():
sampledict[key] = sample._to_fulldict()
## json format it using cumstom Encoder class
fulldumps = json.dumps({
"assembly": datadict,
"samples": sampledict
},
cls=Encoder,
sort_keys=False, indent=4, separators=(",", ":"),
)
## save to file
assemblypath = os.path.join(data.dirs.project, data.name+".json")
if not os.path.exists(data.dirs.project):
os.mkdir(data.dirs.project)
## protect save from interruption
done = 0
while not done:
try:
with open(assemblypath, 'w') as jout:
jout.write(fulldumps)
done = 1
except (KeyboardInterrupt, SystemExit):
print('.')
continue
|
[
"Save",
"assembly",
"and",
"samples",
"as",
"json"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/load/load.py#L117-L169
|
[
"def",
"save_json",
"(",
"data",
")",
":",
"## data as dict",
"#### skip _ipcluster because it's made new",
"#### skip _headers because it's loaded new",
"#### statsfiles save only keys",
"#### samples save only keys",
"datadict",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"_version\"",
",",
"data",
".",
"__dict__",
"[",
"\"_version\"",
"]",
")",
",",
"(",
"\"_checkpoint\"",
",",
"data",
".",
"__dict__",
"[",
"\"_checkpoint\"",
"]",
")",
",",
"(",
"\"name\"",
",",
"data",
".",
"__dict__",
"[",
"\"name\"",
"]",
")",
",",
"(",
"\"dirs\"",
",",
"data",
".",
"__dict__",
"[",
"\"dirs\"",
"]",
")",
",",
"(",
"\"paramsdict\"",
",",
"data",
".",
"__dict__",
"[",
"\"paramsdict\"",
"]",
")",
",",
"(",
"\"samples\"",
",",
"data",
".",
"__dict__",
"[",
"\"samples\"",
"]",
".",
"keys",
"(",
")",
")",
",",
"(",
"\"populations\"",
",",
"data",
".",
"__dict__",
"[",
"\"populations\"",
"]",
")",
",",
"(",
"\"database\"",
",",
"data",
".",
"__dict__",
"[",
"\"database\"",
"]",
")",
",",
"(",
"\"clust_database\"",
",",
"data",
".",
"__dict__",
"[",
"\"clust_database\"",
"]",
")",
",",
"(",
"\"outfiles\"",
",",
"data",
".",
"__dict__",
"[",
"\"outfiles\"",
"]",
")",
",",
"(",
"\"barcodes\"",
",",
"data",
".",
"__dict__",
"[",
"\"barcodes\"",
"]",
")",
",",
"(",
"\"stats_files\"",
",",
"data",
".",
"__dict__",
"[",
"\"stats_files\"",
"]",
")",
",",
"(",
"\"_hackersonly\"",
",",
"data",
".",
"__dict__",
"[",
"\"_hackersonly\"",
"]",
")",
",",
"]",
")",
"## sample dict",
"sampledict",
"=",
"OrderedDict",
"(",
"[",
"]",
")",
"for",
"key",
",",
"sample",
"in",
"data",
".",
"samples",
".",
"iteritems",
"(",
")",
":",
"sampledict",
"[",
"key",
"]",
"=",
"sample",
".",
"_to_fulldict",
"(",
")",
"## json format it using cumstom Encoder class",
"fulldumps",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"assembly\"",
":",
"datadict",
",",
"\"samples\"",
":",
"sampledict",
"}",
",",
"cls",
"=",
"Encoder",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
",",
")",
"## save to file",
"assemblypath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"project",
",",
"data",
".",
"name",
"+",
"\".json\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"dirs",
".",
"project",
")",
":",
"os",
".",
"mkdir",
"(",
"data",
".",
"dirs",
".",
"project",
")",
"## protect save from interruption",
"done",
"=",
"0",
"while",
"not",
"done",
":",
"try",
":",
"with",
"open",
"(",
"assemblypath",
",",
"'w'",
")",
"as",
"jout",
":",
"jout",
".",
"write",
"(",
"fulldumps",
")",
"done",
"=",
"1",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"print",
"(",
"'.'",
")",
"continue"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
load_json
|
Load a json serialized object and ensure it matches to the current
Assembly object format
|
ipyrad/load/load.py
|
def load_json(path, quiet=False, cli=False):
"""
Load a json serialized object and ensure it matches to the current
Assembly object format
"""
## load the JSON string and try with name+.json
checkfor = [path+".json", path]
for inpath in checkfor:
inpath = inpath.replace("~", os.path.expanduser("~"))
try:
with open(inpath, 'r') as infile:
## uses _tup_and_byte to ensure ascii and tuples are correct
fullj = json.loads(infile.read(), object_hook=_tup_and_byte)
except IOError:
pass
## create a new empty Assembly
try:
oldname = fullj["assembly"].pop("name")
olddir = fullj["assembly"]["dirs"]["project"]
oldpath = os.path.join(olddir, os.path.splitext(oldname)[0]+".json")
null = ip.Assembly(oldname, quiet=True, cli=cli)
except (UnboundLocalError, AttributeError) as inst:
raise IPyradWarningExit("""
Could not find saved Assembly file (.json) in expected location.
Checks in: [project_dir]/[assembly_name].json
Checked: {}
""".format(inpath))
## print msg with shortpath
if not quiet:
oldpath = oldpath.replace(os.path.expanduser("~"), "~")
print("{}loading Assembly: {}".format(null._spacer, oldname))
print("{}from saved path: {}".format(null._spacer, oldpath))
## First get the samples. Create empty sample dict of correct length
samplekeys = fullj["assembly"].pop("samples")
null.samples = {name: "" for name in samplekeys}
## Next get paramsdict and use set_params to convert values back to
## the correct dtypes. Allow set_params to fail because the object will
## be subsequently updated by the params from the params file, which may
## correct any errors/incompatibilities in the old params file
oldparams = fullj["assembly"].pop("paramsdict")
for param, val in oldparams.iteritems():
## a fix for backward compatibility with deprecated options
if param not in ["assembly_name", "excludes", "outgroups"]:
try:
null.set_params(param, val)
except IPyradWarningExit as inst:
#null.set_params(param, "")
LOGGER.warning("""
Load assembly error setting params. Not critical b/c new params file may
correct the problem. Recorded here for debugging:
{}
""".format(inst))
## Import the hackersonly dict. In this case we don't have the nice
## set_params so we're shooting from the hip to reset the values
try:
oldhackersonly = fullj["assembly"].pop("_hackersonly")
for param, val in oldhackersonly.iteritems():
if val == None:
null._hackersonly[param] = None
else:
null._hackersonly[param] = val
except Exception as inst:
LOGGER.warning("""
Load assembly error resetting hackersonly dict element. We will just use
the default value in the current assembly.""")
## Check remaining attributes of Assembly and Raise warning if attributes
## do not match up between old and new objects
newkeys = null.__dict__.keys()
oldkeys = fullj["assembly"].keys()
## find shared keys and deprecated keys
sharedkeys = set(oldkeys).intersection(set(newkeys))
lostkeys = set(oldkeys).difference(set(newkeys))
## raise warning if there are lost/deprecated keys
if lostkeys:
LOGGER.warning("""
load_json found {a} keys that are unique to the older Assembly.
- assembly [{b}] v.[{c}] has: {d}
- current assembly is v.[{e}]
""".format(a=len(lostkeys),
b=oldname,
c=fullj["assembly"]["_version"],
d=lostkeys,
e=null._version))
## load in remaining shared Assembly attributes to null
for key in sharedkeys:
null.__setattr__(key, fullj["assembly"][key])
## load in svd results if they exist
try:
if fullj["assembly"]["svd"]:
null.__setattr__("svd", fullj["assembly"]["svd"])
null.svd = ObjDict(null.svd)
except Exception:
LOGGER.debug("skipping: no svd results present in old assembly")
## Now, load in the Sample objects json dicts
sample_names = fullj["samples"].keys()
if not sample_names:
raise IPyradWarningExit("""
No samples found in saved assembly. If you are just starting a new
assembly the file probably got saved erroneously, so it's safe to try
removing the assembly file (e.g., rm {}.json) and restarting.
If you fully completed step 1 and you see this message you should probably
contact the developers.
""".format(inpath))
sample_keys = fullj["samples"][sample_names[0]].keys()
stats_keys = fullj["samples"][sample_names[0]]["stats"].keys()
stats_dfs_keys = fullj["samples"][sample_names[0]]["stats_dfs"].keys()
ind_statkeys = \
[fullj["samples"][sample_names[0]]["stats_dfs"][i].keys() \
for i in stats_dfs_keys]
ind_statkeys = list(itertools.chain(*ind_statkeys))
## check against a null sample
nsamp = ip.Sample()
newkeys = nsamp.__dict__.keys()
newstats = nsamp.__dict__["stats"].keys()
newstatdfs = nsamp.__dict__["stats_dfs"].keys()
newindstats = [nsamp.__dict__["stats_dfs"][i].keys() for i in newstatdfs]
newindstats = list(itertools.chain(*[i.values for i in newindstats]))
## different in attributes?
diffattr = set(sample_keys).difference(newkeys)
diffstats = set(stats_keys).difference(newstats)
diffindstats = set(ind_statkeys).difference(newindstats)
## Raise warning if any oldstats were lost or deprecated
alldiffs = diffattr.union(diffstats).union(diffindstats)
if any(alldiffs):
LOGGER.warning("""
load_json found {a} keys that are unique to the older Samples.
- assembly [{b}] v.[{c}] has: {d}
- current assembly is v.[{e}]
""".format(a=len(alldiffs),
b=oldname,
c=fullj["assembly"]["_version"],
d=alldiffs,
e=null._version))
## save stats and statsfiles to Samples
for sample in null.samples:
## create a null Sample
null.samples[sample] = ip.Sample()
## save stats
sdat = fullj["samples"][sample]['stats']
## Reorder the keys so they ascend by step, only include
## stats that are actually in the sample. newstats is a
## list of the new sample stat names, and stats_keys
## are the names of the stats from the json file.
newstats = [x for x in newstats if x in stats_keys]
null.samples[sample].stats = pd.Series(sdat).reindex(newstats)
## save stats_dfs
for statskey in stats_dfs_keys:
null.samples[sample].stats_dfs[statskey] = \
pd.Series(fullj["samples"][sample]["stats_dfs"][statskey])\
.reindex(nsamp.__dict__["stats_dfs"][statskey].keys())
## save Sample files
for filehandle in fullj["samples"][sample]["files"].keys():
null.samples[sample].files[filehandle] = \
fullj["samples"][sample]["files"][filehandle]
## build the Assembly object stats_dfs
for statskey in stats_dfs_keys:
indstat = null._build_stat(statskey)
if not indstat.empty:
null.stats_dfs[statskey] = indstat
## add remaning attributes to null Samples
shared_keys = set(sample_keys).intersection(newkeys)
shared_keys.discard("stats")
shared_keys.discard("files")
shared_keys.discard("stats_files")
shared_keys.discard("stats_dfs")
for sample in null.samples:
## set the others
for key in shared_keys:
null.samples[sample].__setattr__(key, fullj["samples"][sample][key])
## ensure objects are object dicts
null.dirs = ObjDict(null.dirs)
null.stats_files = ObjDict(null.stats_files)
null.stats_dfs = ObjDict(null.stats_dfs)
null.populations = ObjDict(null.populations)
null.outfiles = ObjDict(null.outfiles)
return null
|
def load_json(path, quiet=False, cli=False):
"""
Load a json serialized object and ensure it matches to the current
Assembly object format
"""
## load the JSON string and try with name+.json
checkfor = [path+".json", path]
for inpath in checkfor:
inpath = inpath.replace("~", os.path.expanduser("~"))
try:
with open(inpath, 'r') as infile:
## uses _tup_and_byte to ensure ascii and tuples are correct
fullj = json.loads(infile.read(), object_hook=_tup_and_byte)
except IOError:
pass
## create a new empty Assembly
try:
oldname = fullj["assembly"].pop("name")
olddir = fullj["assembly"]["dirs"]["project"]
oldpath = os.path.join(olddir, os.path.splitext(oldname)[0]+".json")
null = ip.Assembly(oldname, quiet=True, cli=cli)
except (UnboundLocalError, AttributeError) as inst:
raise IPyradWarningExit("""
Could not find saved Assembly file (.json) in expected location.
Checks in: [project_dir]/[assembly_name].json
Checked: {}
""".format(inpath))
## print msg with shortpath
if not quiet:
oldpath = oldpath.replace(os.path.expanduser("~"), "~")
print("{}loading Assembly: {}".format(null._spacer, oldname))
print("{}from saved path: {}".format(null._spacer, oldpath))
## First get the samples. Create empty sample dict of correct length
samplekeys = fullj["assembly"].pop("samples")
null.samples = {name: "" for name in samplekeys}
## Next get paramsdict and use set_params to convert values back to
## the correct dtypes. Allow set_params to fail because the object will
## be subsequently updated by the params from the params file, which may
## correct any errors/incompatibilities in the old params file
oldparams = fullj["assembly"].pop("paramsdict")
for param, val in oldparams.iteritems():
## a fix for backward compatibility with deprecated options
if param not in ["assembly_name", "excludes", "outgroups"]:
try:
null.set_params(param, val)
except IPyradWarningExit as inst:
#null.set_params(param, "")
LOGGER.warning("""
Load assembly error setting params. Not critical b/c new params file may
correct the problem. Recorded here for debugging:
{}
""".format(inst))
## Import the hackersonly dict. In this case we don't have the nice
## set_params so we're shooting from the hip to reset the values
try:
oldhackersonly = fullj["assembly"].pop("_hackersonly")
for param, val in oldhackersonly.iteritems():
if val == None:
null._hackersonly[param] = None
else:
null._hackersonly[param] = val
except Exception as inst:
LOGGER.warning("""
Load assembly error resetting hackersonly dict element. We will just use
the default value in the current assembly.""")
## Check remaining attributes of Assembly and Raise warning if attributes
## do not match up between old and new objects
newkeys = null.__dict__.keys()
oldkeys = fullj["assembly"].keys()
## find shared keys and deprecated keys
sharedkeys = set(oldkeys).intersection(set(newkeys))
lostkeys = set(oldkeys).difference(set(newkeys))
## raise warning if there are lost/deprecated keys
if lostkeys:
LOGGER.warning("""
load_json found {a} keys that are unique to the older Assembly.
- assembly [{b}] v.[{c}] has: {d}
- current assembly is v.[{e}]
""".format(a=len(lostkeys),
b=oldname,
c=fullj["assembly"]["_version"],
d=lostkeys,
e=null._version))
## load in remaining shared Assembly attributes to null
for key in sharedkeys:
null.__setattr__(key, fullj["assembly"][key])
## load in svd results if they exist
try:
if fullj["assembly"]["svd"]:
null.__setattr__("svd", fullj["assembly"]["svd"])
null.svd = ObjDict(null.svd)
except Exception:
LOGGER.debug("skipping: no svd results present in old assembly")
## Now, load in the Sample objects json dicts
sample_names = fullj["samples"].keys()
if not sample_names:
raise IPyradWarningExit("""
No samples found in saved assembly. If you are just starting a new
assembly the file probably got saved erroneously, so it's safe to try
removing the assembly file (e.g., rm {}.json) and restarting.
If you fully completed step 1 and you see this message you should probably
contact the developers.
""".format(inpath))
sample_keys = fullj["samples"][sample_names[0]].keys()
stats_keys = fullj["samples"][sample_names[0]]["stats"].keys()
stats_dfs_keys = fullj["samples"][sample_names[0]]["stats_dfs"].keys()
ind_statkeys = \
[fullj["samples"][sample_names[0]]["stats_dfs"][i].keys() \
for i in stats_dfs_keys]
ind_statkeys = list(itertools.chain(*ind_statkeys))
## check against a null sample
nsamp = ip.Sample()
newkeys = nsamp.__dict__.keys()
newstats = nsamp.__dict__["stats"].keys()
newstatdfs = nsamp.__dict__["stats_dfs"].keys()
newindstats = [nsamp.__dict__["stats_dfs"][i].keys() for i in newstatdfs]
newindstats = list(itertools.chain(*[i.values for i in newindstats]))
## different in attributes?
diffattr = set(sample_keys).difference(newkeys)
diffstats = set(stats_keys).difference(newstats)
diffindstats = set(ind_statkeys).difference(newindstats)
## Raise warning if any oldstats were lost or deprecated
alldiffs = diffattr.union(diffstats).union(diffindstats)
if any(alldiffs):
LOGGER.warning("""
load_json found {a} keys that are unique to the older Samples.
- assembly [{b}] v.[{c}] has: {d}
- current assembly is v.[{e}]
""".format(a=len(alldiffs),
b=oldname,
c=fullj["assembly"]["_version"],
d=alldiffs,
e=null._version))
## save stats and statsfiles to Samples
for sample in null.samples:
## create a null Sample
null.samples[sample] = ip.Sample()
## save stats
sdat = fullj["samples"][sample]['stats']
## Reorder the keys so they ascend by step, only include
## stats that are actually in the sample. newstats is a
## list of the new sample stat names, and stats_keys
## are the names of the stats from the json file.
newstats = [x for x in newstats if x in stats_keys]
null.samples[sample].stats = pd.Series(sdat).reindex(newstats)
## save stats_dfs
for statskey in stats_dfs_keys:
null.samples[sample].stats_dfs[statskey] = \
pd.Series(fullj["samples"][sample]["stats_dfs"][statskey])\
.reindex(nsamp.__dict__["stats_dfs"][statskey].keys())
## save Sample files
for filehandle in fullj["samples"][sample]["files"].keys():
null.samples[sample].files[filehandle] = \
fullj["samples"][sample]["files"][filehandle]
## build the Assembly object stats_dfs
for statskey in stats_dfs_keys:
indstat = null._build_stat(statskey)
if not indstat.empty:
null.stats_dfs[statskey] = indstat
## add remaning attributes to null Samples
shared_keys = set(sample_keys).intersection(newkeys)
shared_keys.discard("stats")
shared_keys.discard("files")
shared_keys.discard("stats_files")
shared_keys.discard("stats_dfs")
for sample in null.samples:
## set the others
for key in shared_keys:
null.samples[sample].__setattr__(key, fullj["samples"][sample][key])
## ensure objects are object dicts
null.dirs = ObjDict(null.dirs)
null.stats_files = ObjDict(null.stats_files)
null.stats_dfs = ObjDict(null.stats_dfs)
null.populations = ObjDict(null.populations)
null.outfiles = ObjDict(null.outfiles)
return null
|
[
"Load",
"a",
"json",
"serialized",
"object",
"and",
"ensure",
"it",
"matches",
"to",
"the",
"current",
"Assembly",
"object",
"format"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/load/load.py#L173-L377
|
[
"def",
"load_json",
"(",
"path",
",",
"quiet",
"=",
"False",
",",
"cli",
"=",
"False",
")",
":",
"## load the JSON string and try with name+.json",
"checkfor",
"=",
"[",
"path",
"+",
"\".json\"",
",",
"path",
"]",
"for",
"inpath",
"in",
"checkfor",
":",
"inpath",
"=",
"inpath",
".",
"replace",
"(",
"\"~\"",
",",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
")",
"try",
":",
"with",
"open",
"(",
"inpath",
",",
"'r'",
")",
"as",
"infile",
":",
"## uses _tup_and_byte to ensure ascii and tuples are correct",
"fullj",
"=",
"json",
".",
"loads",
"(",
"infile",
".",
"read",
"(",
")",
",",
"object_hook",
"=",
"_tup_and_byte",
")",
"except",
"IOError",
":",
"pass",
"## create a new empty Assembly",
"try",
":",
"oldname",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
".",
"pop",
"(",
"\"name\"",
")",
"olddir",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
"[",
"\"dirs\"",
"]",
"[",
"\"project\"",
"]",
"oldpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"olddir",
",",
"os",
".",
"path",
".",
"splitext",
"(",
"oldname",
")",
"[",
"0",
"]",
"+",
"\".json\"",
")",
"null",
"=",
"ip",
".",
"Assembly",
"(",
"oldname",
",",
"quiet",
"=",
"True",
",",
"cli",
"=",
"cli",
")",
"except",
"(",
"UnboundLocalError",
",",
"AttributeError",
")",
"as",
"inst",
":",
"raise",
"IPyradWarningExit",
"(",
"\"\"\"\n Could not find saved Assembly file (.json) in expected location.\n Checks in: [project_dir]/[assembly_name].json\n Checked: {}\n \"\"\"",
".",
"format",
"(",
"inpath",
")",
")",
"## print msg with shortpath",
"if",
"not",
"quiet",
":",
"oldpath",
"=",
"oldpath",
".",
"replace",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
",",
"\"~\"",
")",
"print",
"(",
"\"{}loading Assembly: {}\"",
".",
"format",
"(",
"null",
".",
"_spacer",
",",
"oldname",
")",
")",
"print",
"(",
"\"{}from saved path: {}\"",
".",
"format",
"(",
"null",
".",
"_spacer",
",",
"oldpath",
")",
")",
"## First get the samples. Create empty sample dict of correct length ",
"samplekeys",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
".",
"pop",
"(",
"\"samples\"",
")",
"null",
".",
"samples",
"=",
"{",
"name",
":",
"\"\"",
"for",
"name",
"in",
"samplekeys",
"}",
"## Next get paramsdict and use set_params to convert values back to ",
"## the correct dtypes. Allow set_params to fail because the object will ",
"## be subsequently updated by the params from the params file, which may",
"## correct any errors/incompatibilities in the old params file",
"oldparams",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
".",
"pop",
"(",
"\"paramsdict\"",
")",
"for",
"param",
",",
"val",
"in",
"oldparams",
".",
"iteritems",
"(",
")",
":",
"## a fix for backward compatibility with deprecated options",
"if",
"param",
"not",
"in",
"[",
"\"assembly_name\"",
",",
"\"excludes\"",
",",
"\"outgroups\"",
"]",
":",
"try",
":",
"null",
".",
"set_params",
"(",
"param",
",",
"val",
")",
"except",
"IPyradWarningExit",
"as",
"inst",
":",
"#null.set_params(param, \"\")",
"LOGGER",
".",
"warning",
"(",
"\"\"\" \n Load assembly error setting params. Not critical b/c new params file may\n correct the problem. Recorded here for debugging:\n {}\n \"\"\"",
".",
"format",
"(",
"inst",
")",
")",
"## Import the hackersonly dict. In this case we don't have the nice",
"## set_params so we're shooting from the hip to reset the values",
"try",
":",
"oldhackersonly",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
".",
"pop",
"(",
"\"_hackersonly\"",
")",
"for",
"param",
",",
"val",
"in",
"oldhackersonly",
".",
"iteritems",
"(",
")",
":",
"if",
"val",
"==",
"None",
":",
"null",
".",
"_hackersonly",
"[",
"param",
"]",
"=",
"None",
"else",
":",
"null",
".",
"_hackersonly",
"[",
"param",
"]",
"=",
"val",
"except",
"Exception",
"as",
"inst",
":",
"LOGGER",
".",
"warning",
"(",
"\"\"\"\n Load assembly error resetting hackersonly dict element. We will just use\n the default value in the current assembly.\"\"\"",
")",
"## Check remaining attributes of Assembly and Raise warning if attributes",
"## do not match up between old and new objects",
"newkeys",
"=",
"null",
".",
"__dict__",
".",
"keys",
"(",
")",
"oldkeys",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
".",
"keys",
"(",
")",
"## find shared keys and deprecated keys",
"sharedkeys",
"=",
"set",
"(",
"oldkeys",
")",
".",
"intersection",
"(",
"set",
"(",
"newkeys",
")",
")",
"lostkeys",
"=",
"set",
"(",
"oldkeys",
")",
".",
"difference",
"(",
"set",
"(",
"newkeys",
")",
")",
"## raise warning if there are lost/deprecated keys",
"if",
"lostkeys",
":",
"LOGGER",
".",
"warning",
"(",
"\"\"\"\n load_json found {a} keys that are unique to the older Assembly.\n - assembly [{b}] v.[{c}] has: {d}\n - current assembly is v.[{e}]\n \"\"\"",
".",
"format",
"(",
"a",
"=",
"len",
"(",
"lostkeys",
")",
",",
"b",
"=",
"oldname",
",",
"c",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
"[",
"\"_version\"",
"]",
",",
"d",
"=",
"lostkeys",
",",
"e",
"=",
"null",
".",
"_version",
")",
")",
"## load in remaining shared Assembly attributes to null",
"for",
"key",
"in",
"sharedkeys",
":",
"null",
".",
"__setattr__",
"(",
"key",
",",
"fullj",
"[",
"\"assembly\"",
"]",
"[",
"key",
"]",
")",
"## load in svd results if they exist",
"try",
":",
"if",
"fullj",
"[",
"\"assembly\"",
"]",
"[",
"\"svd\"",
"]",
":",
"null",
".",
"__setattr__",
"(",
"\"svd\"",
",",
"fullj",
"[",
"\"assembly\"",
"]",
"[",
"\"svd\"",
"]",
")",
"null",
".",
"svd",
"=",
"ObjDict",
"(",
"null",
".",
"svd",
")",
"except",
"Exception",
":",
"LOGGER",
".",
"debug",
"(",
"\"skipping: no svd results present in old assembly\"",
")",
"## Now, load in the Sample objects json dicts",
"sample_names",
"=",
"fullj",
"[",
"\"samples\"",
"]",
".",
"keys",
"(",
")",
"if",
"not",
"sample_names",
":",
"raise",
"IPyradWarningExit",
"(",
"\"\"\"\n No samples found in saved assembly. If you are just starting a new\n assembly the file probably got saved erroneously, so it's safe to try \n removing the assembly file (e.g., rm {}.json) and restarting.\n\n If you fully completed step 1 and you see this message you should probably\n contact the developers.\n \"\"\"",
".",
"format",
"(",
"inpath",
")",
")",
"sample_keys",
"=",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample_names",
"[",
"0",
"]",
"]",
".",
"keys",
"(",
")",
"stats_keys",
"=",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample_names",
"[",
"0",
"]",
"]",
"[",
"\"stats\"",
"]",
".",
"keys",
"(",
")",
"stats_dfs_keys",
"=",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample_names",
"[",
"0",
"]",
"]",
"[",
"\"stats_dfs\"",
"]",
".",
"keys",
"(",
")",
"ind_statkeys",
"=",
"[",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample_names",
"[",
"0",
"]",
"]",
"[",
"\"stats_dfs\"",
"]",
"[",
"i",
"]",
".",
"keys",
"(",
")",
"for",
"i",
"in",
"stats_dfs_keys",
"]",
"ind_statkeys",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"ind_statkeys",
")",
")",
"## check against a null sample",
"nsamp",
"=",
"ip",
".",
"Sample",
"(",
")",
"newkeys",
"=",
"nsamp",
".",
"__dict__",
".",
"keys",
"(",
")",
"newstats",
"=",
"nsamp",
".",
"__dict__",
"[",
"\"stats\"",
"]",
".",
"keys",
"(",
")",
"newstatdfs",
"=",
"nsamp",
".",
"__dict__",
"[",
"\"stats_dfs\"",
"]",
".",
"keys",
"(",
")",
"newindstats",
"=",
"[",
"nsamp",
".",
"__dict__",
"[",
"\"stats_dfs\"",
"]",
"[",
"i",
"]",
".",
"keys",
"(",
")",
"for",
"i",
"in",
"newstatdfs",
"]",
"newindstats",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"[",
"i",
".",
"values",
"for",
"i",
"in",
"newindstats",
"]",
")",
")",
"## different in attributes?",
"diffattr",
"=",
"set",
"(",
"sample_keys",
")",
".",
"difference",
"(",
"newkeys",
")",
"diffstats",
"=",
"set",
"(",
"stats_keys",
")",
".",
"difference",
"(",
"newstats",
")",
"diffindstats",
"=",
"set",
"(",
"ind_statkeys",
")",
".",
"difference",
"(",
"newindstats",
")",
"## Raise warning if any oldstats were lost or deprecated",
"alldiffs",
"=",
"diffattr",
".",
"union",
"(",
"diffstats",
")",
".",
"union",
"(",
"diffindstats",
")",
"if",
"any",
"(",
"alldiffs",
")",
":",
"LOGGER",
".",
"warning",
"(",
"\"\"\"\n load_json found {a} keys that are unique to the older Samples.\n - assembly [{b}] v.[{c}] has: {d}\n - current assembly is v.[{e}]\n \"\"\"",
".",
"format",
"(",
"a",
"=",
"len",
"(",
"alldiffs",
")",
",",
"b",
"=",
"oldname",
",",
"c",
"=",
"fullj",
"[",
"\"assembly\"",
"]",
"[",
"\"_version\"",
"]",
",",
"d",
"=",
"alldiffs",
",",
"e",
"=",
"null",
".",
"_version",
")",
")",
"## save stats and statsfiles to Samples",
"for",
"sample",
"in",
"null",
".",
"samples",
":",
"## create a null Sample",
"null",
".",
"samples",
"[",
"sample",
"]",
"=",
"ip",
".",
"Sample",
"(",
")",
"## save stats",
"sdat",
"=",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample",
"]",
"[",
"'stats'",
"]",
"## Reorder the keys so they ascend by step, only include",
"## stats that are actually in the sample. newstats is a",
"## list of the new sample stat names, and stats_keys",
"## are the names of the stats from the json file.",
"newstats",
"=",
"[",
"x",
"for",
"x",
"in",
"newstats",
"if",
"x",
"in",
"stats_keys",
"]",
"null",
".",
"samples",
"[",
"sample",
"]",
".",
"stats",
"=",
"pd",
".",
"Series",
"(",
"sdat",
")",
".",
"reindex",
"(",
"newstats",
")",
"## save stats_dfs",
"for",
"statskey",
"in",
"stats_dfs_keys",
":",
"null",
".",
"samples",
"[",
"sample",
"]",
".",
"stats_dfs",
"[",
"statskey",
"]",
"=",
"pd",
".",
"Series",
"(",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample",
"]",
"[",
"\"stats_dfs\"",
"]",
"[",
"statskey",
"]",
")",
".",
"reindex",
"(",
"nsamp",
".",
"__dict__",
"[",
"\"stats_dfs\"",
"]",
"[",
"statskey",
"]",
".",
"keys",
"(",
")",
")",
"## save Sample files",
"for",
"filehandle",
"in",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample",
"]",
"[",
"\"files\"",
"]",
".",
"keys",
"(",
")",
":",
"null",
".",
"samples",
"[",
"sample",
"]",
".",
"files",
"[",
"filehandle",
"]",
"=",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample",
"]",
"[",
"\"files\"",
"]",
"[",
"filehandle",
"]",
"## build the Assembly object stats_dfs",
"for",
"statskey",
"in",
"stats_dfs_keys",
":",
"indstat",
"=",
"null",
".",
"_build_stat",
"(",
"statskey",
")",
"if",
"not",
"indstat",
".",
"empty",
":",
"null",
".",
"stats_dfs",
"[",
"statskey",
"]",
"=",
"indstat",
"## add remaning attributes to null Samples",
"shared_keys",
"=",
"set",
"(",
"sample_keys",
")",
".",
"intersection",
"(",
"newkeys",
")",
"shared_keys",
".",
"discard",
"(",
"\"stats\"",
")",
"shared_keys",
".",
"discard",
"(",
"\"files\"",
")",
"shared_keys",
".",
"discard",
"(",
"\"stats_files\"",
")",
"shared_keys",
".",
"discard",
"(",
"\"stats_dfs\"",
")",
"for",
"sample",
"in",
"null",
".",
"samples",
":",
"## set the others",
"for",
"key",
"in",
"shared_keys",
":",
"null",
".",
"samples",
"[",
"sample",
"]",
".",
"__setattr__",
"(",
"key",
",",
"fullj",
"[",
"\"samples\"",
"]",
"[",
"sample",
"]",
"[",
"key",
"]",
")",
"## ensure objects are object dicts",
"null",
".",
"dirs",
"=",
"ObjDict",
"(",
"null",
".",
"dirs",
")",
"null",
".",
"stats_files",
"=",
"ObjDict",
"(",
"null",
".",
"stats_files",
")",
"null",
".",
"stats_dfs",
"=",
"ObjDict",
"(",
"null",
".",
"stats_dfs",
")",
"null",
".",
"populations",
"=",
"ObjDict",
"(",
"null",
".",
"populations",
")",
"null",
".",
"outfiles",
"=",
"ObjDict",
"(",
"null",
".",
"outfiles",
")",
"return",
"null"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_tup_and_byte
|
wat
|
ipyrad/load/load.py
|
def _tup_and_byte(obj):
""" wat """
# if this is a unicode string, return its string representation
if isinstance(obj, unicode):
return obj.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(obj, list):
return [_tup_and_byte(item) for item in obj]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(obj, dict):
if "__tuple__" in obj:
return tuple(_tup_and_byte(item) for item in obj["items"])
else:
return {
_tup_and_byte(key): _tup_and_byte(val) for \
key, val in obj.iteritems()
}
# if it's anything else, return it in its original form
return obj
|
def _tup_and_byte(obj):
""" wat """
# if this is a unicode string, return its string representation
if isinstance(obj, unicode):
return obj.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(obj, list):
return [_tup_and_byte(item) for item in obj]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(obj, dict):
if "__tuple__" in obj:
return tuple(_tup_and_byte(item) for item in obj["items"])
else:
return {
_tup_and_byte(key): _tup_and_byte(val) for \
key, val in obj.iteritems()
}
# if it's anything else, return it in its original form
return obj
|
[
"wat"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/load/load.py#L407-L429
|
[
"def",
"_tup_and_byte",
"(",
"obj",
")",
":",
"# if this is a unicode string, return its string representation",
"if",
"isinstance",
"(",
"obj",
",",
"unicode",
")",
":",
"return",
"obj",
".",
"encode",
"(",
"'utf-8'",
")",
"# if this is a list of values, return list of byteified values",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"return",
"[",
"_tup_and_byte",
"(",
"item",
")",
"for",
"item",
"in",
"obj",
"]",
"# if this is a dictionary, return dictionary of byteified keys and values",
"# but only if we haven't already byteified it",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"if",
"\"__tuple__\"",
"in",
"obj",
":",
"return",
"tuple",
"(",
"_tup_and_byte",
"(",
"item",
")",
"for",
"item",
"in",
"obj",
"[",
"\"items\"",
"]",
")",
"else",
":",
"return",
"{",
"_tup_and_byte",
"(",
"key",
")",
":",
"_tup_and_byte",
"(",
"val",
")",
"for",
"key",
",",
"val",
"in",
"obj",
".",
"iteritems",
"(",
")",
"}",
"# if it's anything else, return it in its original form",
"return",
"obj"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Encoder.encode
|
function to encode json string
|
ipyrad/load/load.py
|
def encode(self, obj):
""" function to encode json string"""
def hint_tuples(item):
""" embeds __tuple__ hinter in json strings """
if isinstance(item, tuple):
return {'__tuple__': True, 'items': item}
if isinstance(item, list):
return [hint_tuples(e) for e in item]
if isinstance(item, dict):
return {
key: hint_tuples(val) for key, val in item.iteritems()
}
else:
return item
return super(Encoder, self).encode(hint_tuples(obj))
|
def encode(self, obj):
""" function to encode json string"""
def hint_tuples(item):
""" embeds __tuple__ hinter in json strings """
if isinstance(item, tuple):
return {'__tuple__': True, 'items': item}
if isinstance(item, list):
return [hint_tuples(e) for e in item]
if isinstance(item, dict):
return {
key: hint_tuples(val) for key, val in item.iteritems()
}
else:
return item
return super(Encoder, self).encode(hint_tuples(obj))
|
[
"function",
"to",
"encode",
"json",
"string"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/load/load.py#L388-L403
|
[
"def",
"encode",
"(",
"self",
",",
"obj",
")",
":",
"def",
"hint_tuples",
"(",
"item",
")",
":",
"\"\"\" embeds __tuple__ hinter in json strings \"\"\"",
"if",
"isinstance",
"(",
"item",
",",
"tuple",
")",
":",
"return",
"{",
"'__tuple__'",
":",
"True",
",",
"'items'",
":",
"item",
"}",
"if",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"return",
"[",
"hint_tuples",
"(",
"e",
")",
"for",
"e",
"in",
"item",
"]",
"if",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"return",
"{",
"key",
":",
"hint_tuples",
"(",
"val",
")",
"for",
"key",
",",
"val",
"in",
"item",
".",
"iteritems",
"(",
")",
"}",
"else",
":",
"return",
"item",
"return",
"super",
"(",
"Encoder",
",",
"self",
")",
".",
"encode",
"(",
"hint_tuples",
"(",
"obj",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
baba_panel_plot
|
signature...
|
ipyrad/plotting/baba_panel_plot.py
|
def baba_panel_plot(
ttree,
tests,
boots,
show_tip_labels=True,
show_test_labels=True,
use_edge_lengths=False,
collapse_outgroup=False,
pct_tree_x=0.4,
pct_tree_y=0.2,
alpha=3.0,
*args,
**kwargs):
"""
signature...
"""
## create Panel plot object and set height & width
bootsarr = np.array(boots)
panel = Panel(ttree, tests, bootsarr, alpha)
if not kwargs.get("width"):
panel.kwargs["width"] = min(1000, 50*len(panel.tree))
if not kwargs.get("height"):
panel.kwargs["height"] = min(1000, 50*len(panel.tests))
## update defaults with kwargs & update size based on ntips & ntests
kwargs.update(dict(pct_tree_x=pct_tree_x, pct_tree_y=pct_tree_y))
panel.kwargs.update(kwargs)
## create a canvas and a single cartesian coord system
canvas = toyplot.Canvas(height=panel.kwargs['height'], width=panel.kwargs['width'])
axes = canvas.cartesian(bounds=("10%", "90%", "5%", "95%"))
axes.show = False
## add panels to axes
panel.panel_tree(axes)
panel.panel_test(axes)
panel.panel_tip_labels(axes)
if isinstance(boots, np.ndarray):
panel.panel_results(axes)
return canvas, axes, panel
|
def baba_panel_plot(
ttree,
tests,
boots,
show_tip_labels=True,
show_test_labels=True,
use_edge_lengths=False,
collapse_outgroup=False,
pct_tree_x=0.4,
pct_tree_y=0.2,
alpha=3.0,
*args,
**kwargs):
"""
signature...
"""
## create Panel plot object and set height & width
bootsarr = np.array(boots)
panel = Panel(ttree, tests, bootsarr, alpha)
if not kwargs.get("width"):
panel.kwargs["width"] = min(1000, 50*len(panel.tree))
if not kwargs.get("height"):
panel.kwargs["height"] = min(1000, 50*len(panel.tests))
## update defaults with kwargs & update size based on ntips & ntests
kwargs.update(dict(pct_tree_x=pct_tree_x, pct_tree_y=pct_tree_y))
panel.kwargs.update(kwargs)
## create a canvas and a single cartesian coord system
canvas = toyplot.Canvas(height=panel.kwargs['height'], width=panel.kwargs['width'])
axes = canvas.cartesian(bounds=("10%", "90%", "5%", "95%"))
axes.show = False
## add panels to axes
panel.panel_tree(axes)
panel.panel_test(axes)
panel.panel_tip_labels(axes)
if isinstance(boots, np.ndarray):
panel.panel_results(axes)
return canvas, axes, panel
|
[
"signature",
"..."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/plotting/baba_panel_plot.py#L26-L66
|
[
"def",
"baba_panel_plot",
"(",
"ttree",
",",
"tests",
",",
"boots",
",",
"show_tip_labels",
"=",
"True",
",",
"show_test_labels",
"=",
"True",
",",
"use_edge_lengths",
"=",
"False",
",",
"collapse_outgroup",
"=",
"False",
",",
"pct_tree_x",
"=",
"0.4",
",",
"pct_tree_y",
"=",
"0.2",
",",
"alpha",
"=",
"3.0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"## create Panel plot object and set height & width",
"bootsarr",
"=",
"np",
".",
"array",
"(",
"boots",
")",
"panel",
"=",
"Panel",
"(",
"ttree",
",",
"tests",
",",
"bootsarr",
",",
"alpha",
")",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"width\"",
")",
":",
"panel",
".",
"kwargs",
"[",
"\"width\"",
"]",
"=",
"min",
"(",
"1000",
",",
"50",
"*",
"len",
"(",
"panel",
".",
"tree",
")",
")",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"height\"",
")",
":",
"panel",
".",
"kwargs",
"[",
"\"height\"",
"]",
"=",
"min",
"(",
"1000",
",",
"50",
"*",
"len",
"(",
"panel",
".",
"tests",
")",
")",
"## update defaults with kwargs & update size based on ntips & ntests",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"pct_tree_x",
"=",
"pct_tree_x",
",",
"pct_tree_y",
"=",
"pct_tree_y",
")",
")",
"panel",
".",
"kwargs",
".",
"update",
"(",
"kwargs",
")",
"## create a canvas and a single cartesian coord system",
"canvas",
"=",
"toyplot",
".",
"Canvas",
"(",
"height",
"=",
"panel",
".",
"kwargs",
"[",
"'height'",
"]",
",",
"width",
"=",
"panel",
".",
"kwargs",
"[",
"'width'",
"]",
")",
"axes",
"=",
"canvas",
".",
"cartesian",
"(",
"bounds",
"=",
"(",
"\"10%\"",
",",
"\"90%\"",
",",
"\"5%\"",
",",
"\"95%\"",
")",
")",
"axes",
".",
"show",
"=",
"False",
"## add panels to axes",
"panel",
".",
"panel_tree",
"(",
"axes",
")",
"panel",
".",
"panel_test",
"(",
"axes",
")",
"panel",
".",
"panel_tip_labels",
"(",
"axes",
")",
"if",
"isinstance",
"(",
"boots",
",",
"np",
".",
"ndarray",
")",
":",
"panel",
".",
"panel_results",
"(",
"axes",
")",
"return",
"canvas",
",",
"axes",
",",
"panel"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
depthplot
|
plots histogram of coverages across clusters
|
ipyrad/plotting/coverageplots.py
|
def depthplot(data, samples=None, dims=(None,None), canvas=(None,None),
xmax=50, log=False, outprefix=None, use_maxdepth=False):
""" plots histogram of coverages across clusters"""
## select samples to be plotted, requires depths info
if not samples:
samples = data.samples.keys()
samples.sort()
subsamples = OrderedDict([(i, data.samples[i]) for i in samples])
## get canvas dimensions based on n-samples
if any(dims):
## user-supplied dimensions (...)
print("userdims")
else:
if len(subsamples) <= 4:
## set dimension to N samples
dims = (1, len(subsamples))
else:
dims = (len(subsamples)/4, 4)
## create canvas
if any(canvas):
print("usercanvas")
canvas = toyplot.Canvas(width=canvas[0], height=canvas[1])
else:
canvas = toyplot.Canvas(width=200*dims[1], height=150*dims[0])
## get all of the data arrays
for panel, sample in enumerate(subsamples):
## statistical called bins
statdat = subsamples[sample].depths
statdat = statdat[statdat >= data.paramsdict["mindepth_statistical"]]
if use_maxdepth:
statdat = {i:j for (i, j) in statdat if \
i < data.paramsdict["maxdepth"]}
sdat = np.histogram(statdat, range(50))
## majrule called bins
statdat = subsamples[sample].depths
statdat = statdat[statdat < data.paramsdict["mindepth_statistical"]]
statdat = statdat[statdat >= data.paramsdict["mindepth_majrule"]]
if use_maxdepth:
statdat = statdat[statdat < data.paramsdict["maxdepth"]]
mdat = np.histogram(statdat, range(50))
## excluded bins
tots = data.samples[sample].depths
tots = tots[tots < data.paramsdict["mindepth_majrule"]]
if use_maxdepth:
tots = tots[tots < data.paramsdict["maxdepth"]]
edat = np.histogram(tots, range(50))
## fill in each panel of canvas with a sample
axes = canvas.cartesian(grid=(dims[0], dims[1], panel), gutter=25)
axes.x.domain.xmax = xmax
axes.label.text = sample
if log:
axes.y.scale = "log"
# heights = np.column_stack((sdat,mdat,edat))
axes.bars(sdat)
axes.bars(edat)
axes.bars(mdat)
## return objects to be saved...
if outprefix:
toyplot.html.render(canvas, fobj=outprefix+".html")
toyplot.svg.render(canvas, fobj=outprefix+".svg")
|
def depthplot(data, samples=None, dims=(None,None), canvas=(None,None),
xmax=50, log=False, outprefix=None, use_maxdepth=False):
""" plots histogram of coverages across clusters"""
## select samples to be plotted, requires depths info
if not samples:
samples = data.samples.keys()
samples.sort()
subsamples = OrderedDict([(i, data.samples[i]) for i in samples])
## get canvas dimensions based on n-samples
if any(dims):
## user-supplied dimensions (...)
print("userdims")
else:
if len(subsamples) <= 4:
## set dimension to N samples
dims = (1, len(subsamples))
else:
dims = (len(subsamples)/4, 4)
## create canvas
if any(canvas):
print("usercanvas")
canvas = toyplot.Canvas(width=canvas[0], height=canvas[1])
else:
canvas = toyplot.Canvas(width=200*dims[1], height=150*dims[0])
## get all of the data arrays
for panel, sample in enumerate(subsamples):
## statistical called bins
statdat = subsamples[sample].depths
statdat = statdat[statdat >= data.paramsdict["mindepth_statistical"]]
if use_maxdepth:
statdat = {i:j for (i, j) in statdat if \
i < data.paramsdict["maxdepth"]}
sdat = np.histogram(statdat, range(50))
## majrule called bins
statdat = subsamples[sample].depths
statdat = statdat[statdat < data.paramsdict["mindepth_statistical"]]
statdat = statdat[statdat >= data.paramsdict["mindepth_majrule"]]
if use_maxdepth:
statdat = statdat[statdat < data.paramsdict["maxdepth"]]
mdat = np.histogram(statdat, range(50))
## excluded bins
tots = data.samples[sample].depths
tots = tots[tots < data.paramsdict["mindepth_majrule"]]
if use_maxdepth:
tots = tots[tots < data.paramsdict["maxdepth"]]
edat = np.histogram(tots, range(50))
## fill in each panel of canvas with a sample
axes = canvas.cartesian(grid=(dims[0], dims[1], panel), gutter=25)
axes.x.domain.xmax = xmax
axes.label.text = sample
if log:
axes.y.scale = "log"
# heights = np.column_stack((sdat,mdat,edat))
axes.bars(sdat)
axes.bars(edat)
axes.bars(mdat)
## return objects to be saved...
if outprefix:
toyplot.html.render(canvas, fobj=outprefix+".html")
toyplot.svg.render(canvas, fobj=outprefix+".svg")
|
[
"plots",
"histogram",
"of",
"coverages",
"across",
"clusters"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/plotting/coverageplots.py#L17-L86
|
[
"def",
"depthplot",
"(",
"data",
",",
"samples",
"=",
"None",
",",
"dims",
"=",
"(",
"None",
",",
"None",
")",
",",
"canvas",
"=",
"(",
"None",
",",
"None",
")",
",",
"xmax",
"=",
"50",
",",
"log",
"=",
"False",
",",
"outprefix",
"=",
"None",
",",
"use_maxdepth",
"=",
"False",
")",
":",
"## select samples to be plotted, requires depths info",
"if",
"not",
"samples",
":",
"samples",
"=",
"data",
".",
"samples",
".",
"keys",
"(",
")",
"samples",
".",
"sort",
"(",
")",
"subsamples",
"=",
"OrderedDict",
"(",
"[",
"(",
"i",
",",
"data",
".",
"samples",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"samples",
"]",
")",
"## get canvas dimensions based on n-samples",
"if",
"any",
"(",
"dims",
")",
":",
"## user-supplied dimensions (...)",
"print",
"(",
"\"userdims\"",
")",
"else",
":",
"if",
"len",
"(",
"subsamples",
")",
"<=",
"4",
":",
"## set dimension to N samples ",
"dims",
"=",
"(",
"1",
",",
"len",
"(",
"subsamples",
")",
")",
"else",
":",
"dims",
"=",
"(",
"len",
"(",
"subsamples",
")",
"/",
"4",
",",
"4",
")",
"## create canvas",
"if",
"any",
"(",
"canvas",
")",
":",
"print",
"(",
"\"usercanvas\"",
")",
"canvas",
"=",
"toyplot",
".",
"Canvas",
"(",
"width",
"=",
"canvas",
"[",
"0",
"]",
",",
"height",
"=",
"canvas",
"[",
"1",
"]",
")",
"else",
":",
"canvas",
"=",
"toyplot",
".",
"Canvas",
"(",
"width",
"=",
"200",
"*",
"dims",
"[",
"1",
"]",
",",
"height",
"=",
"150",
"*",
"dims",
"[",
"0",
"]",
")",
"## get all of the data arrays",
"for",
"panel",
",",
"sample",
"in",
"enumerate",
"(",
"subsamples",
")",
":",
"## statistical called bins",
"statdat",
"=",
"subsamples",
"[",
"sample",
"]",
".",
"depths",
"statdat",
"=",
"statdat",
"[",
"statdat",
">=",
"data",
".",
"paramsdict",
"[",
"\"mindepth_statistical\"",
"]",
"]",
"if",
"use_maxdepth",
":",
"statdat",
"=",
"{",
"i",
":",
"j",
"for",
"(",
"i",
",",
"j",
")",
"in",
"statdat",
"if",
"i",
"<",
"data",
".",
"paramsdict",
"[",
"\"maxdepth\"",
"]",
"}",
"sdat",
"=",
"np",
".",
"histogram",
"(",
"statdat",
",",
"range",
"(",
"50",
")",
")",
"## majrule called bins",
"statdat",
"=",
"subsamples",
"[",
"sample",
"]",
".",
"depths",
"statdat",
"=",
"statdat",
"[",
"statdat",
"<",
"data",
".",
"paramsdict",
"[",
"\"mindepth_statistical\"",
"]",
"]",
"statdat",
"=",
"statdat",
"[",
"statdat",
">=",
"data",
".",
"paramsdict",
"[",
"\"mindepth_majrule\"",
"]",
"]",
"if",
"use_maxdepth",
":",
"statdat",
"=",
"statdat",
"[",
"statdat",
"<",
"data",
".",
"paramsdict",
"[",
"\"maxdepth\"",
"]",
"]",
"mdat",
"=",
"np",
".",
"histogram",
"(",
"statdat",
",",
"range",
"(",
"50",
")",
")",
"## excluded bins",
"tots",
"=",
"data",
".",
"samples",
"[",
"sample",
"]",
".",
"depths",
"tots",
"=",
"tots",
"[",
"tots",
"<",
"data",
".",
"paramsdict",
"[",
"\"mindepth_majrule\"",
"]",
"]",
"if",
"use_maxdepth",
":",
"tots",
"=",
"tots",
"[",
"tots",
"<",
"data",
".",
"paramsdict",
"[",
"\"maxdepth\"",
"]",
"]",
"edat",
"=",
"np",
".",
"histogram",
"(",
"tots",
",",
"range",
"(",
"50",
")",
")",
"## fill in each panel of canvas with a sample",
"axes",
"=",
"canvas",
".",
"cartesian",
"(",
"grid",
"=",
"(",
"dims",
"[",
"0",
"]",
",",
"dims",
"[",
"1",
"]",
",",
"panel",
")",
",",
"gutter",
"=",
"25",
")",
"axes",
".",
"x",
".",
"domain",
".",
"xmax",
"=",
"xmax",
"axes",
".",
"label",
".",
"text",
"=",
"sample",
"if",
"log",
":",
"axes",
".",
"y",
".",
"scale",
"=",
"\"log\"",
"# heights = np.column_stack((sdat,mdat,edat))",
"axes",
".",
"bars",
"(",
"sdat",
")",
"axes",
".",
"bars",
"(",
"edat",
")",
"axes",
".",
"bars",
"(",
"mdat",
")",
"## return objects to be saved...",
"if",
"outprefix",
":",
"toyplot",
".",
"html",
".",
"render",
"(",
"canvas",
",",
"fobj",
"=",
"outprefix",
"+",
"\".html\"",
")",
"toyplot",
".",
"svg",
".",
"render",
"(",
"canvas",
",",
"fobj",
"=",
"outprefix",
"+",
"\".svg\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_parse_00
|
return 00 outfile as a pandas DataFrame
|
ipyrad/analysis/bpp.py
|
def _parse_00(ofile):
"""
return 00 outfile as a pandas DataFrame
"""
with open(ofile) as infile:
## read in the results summary from the end of the outfile
arr = np.array(
[" "] + infile.read().split("Summary of MCMC results\n\n\n")[1:][0]\
.strip().split())
## reshape array
rows = 12
cols = (arr.shape[0] + 1) / rows
arr = arr.reshape(rows, cols)
## make into labeled data frame
df = pd.DataFrame(
data=arr[1:, 1:],
columns=arr[0, 1:],
index=arr[1:, 0],
).T
return df
|
def _parse_00(ofile):
"""
return 00 outfile as a pandas DataFrame
"""
with open(ofile) as infile:
## read in the results summary from the end of the outfile
arr = np.array(
[" "] + infile.read().split("Summary of MCMC results\n\n\n")[1:][0]\
.strip().split())
## reshape array
rows = 12
cols = (arr.shape[0] + 1) / rows
arr = arr.reshape(rows, cols)
## make into labeled data frame
df = pd.DataFrame(
data=arr[1:, 1:],
columns=arr[0, 1:],
index=arr[1:, 0],
).T
return df
|
[
"return",
"00",
"outfile",
"as",
"a",
"pandas",
"DataFrame"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L714-L735
|
[
"def",
"_parse_00",
"(",
"ofile",
")",
":",
"with",
"open",
"(",
"ofile",
")",
"as",
"infile",
":",
"## read in the results summary from the end of the outfile",
"arr",
"=",
"np",
".",
"array",
"(",
"[",
"\" \"",
"]",
"+",
"infile",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"Summary of MCMC results\\n\\n\\n\"",
")",
"[",
"1",
":",
"]",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
")",
"## reshape array ",
"rows",
"=",
"12",
"cols",
"=",
"(",
"arr",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
"/",
"rows",
"arr",
"=",
"arr",
".",
"reshape",
"(",
"rows",
",",
"cols",
")",
"## make into labeled data frame",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"arr",
"[",
"1",
":",
",",
"1",
":",
"]",
",",
"columns",
"=",
"arr",
"[",
"0",
",",
"1",
":",
"]",
",",
"index",
"=",
"arr",
"[",
"1",
":",
",",
"0",
"]",
",",
")",
".",
"T",
"return",
"df"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_parse_01
|
a subfunction for summarizing results
|
ipyrad/analysis/bpp.py
|
def _parse_01(ofiles, individual=False):
"""
a subfunction for summarizing results
"""
## parse results from outfiles
cols = []
dats = []
for ofile in ofiles:
## parse file
with open(ofile) as infile:
dat = infile.read()
lastbits = dat.split(".mcmc.txt\n\n")[1:]
results = lastbits[0].split("\n\n")[0].split()
## get shape from ...
shape = (((len(results) - 3) / 4), 4)
dat = np.array(results[3:]).reshape(shape)
cols.append(dat[:, 3].astype(float))
if not individual:
## get mean results across reps
cols = np.array(cols)
cols = cols.sum(axis=0) / len(ofiles) #10.
dat[:, 3] = cols.astype(str)
## format as a DF
df = pd.DataFrame(dat[:, 1:])
df.columns = ["delim", "prior", "posterior"]
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
df["nspecies"] = nspecies
return df
else:
## get mean results across reps
#return cols
res = []
for i in xrange(len(cols)):
x = dat
x[:, 3] = cols[i].astype(str)
x = pd.DataFrame(x[:, 1:])
x.columns = ['delim', 'prior', 'posterior']
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
x["nspecies"] = nspecies
res.append(x)
return res
|
def _parse_01(ofiles, individual=False):
"""
a subfunction for summarizing results
"""
## parse results from outfiles
cols = []
dats = []
for ofile in ofiles:
## parse file
with open(ofile) as infile:
dat = infile.read()
lastbits = dat.split(".mcmc.txt\n\n")[1:]
results = lastbits[0].split("\n\n")[0].split()
## get shape from ...
shape = (((len(results) - 3) / 4), 4)
dat = np.array(results[3:]).reshape(shape)
cols.append(dat[:, 3].astype(float))
if not individual:
## get mean results across reps
cols = np.array(cols)
cols = cols.sum(axis=0) / len(ofiles) #10.
dat[:, 3] = cols.astype(str)
## format as a DF
df = pd.DataFrame(dat[:, 1:])
df.columns = ["delim", "prior", "posterior"]
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
df["nspecies"] = nspecies
return df
else:
## get mean results across reps
#return cols
res = []
for i in xrange(len(cols)):
x = dat
x[:, 3] = cols[i].astype(str)
x = pd.DataFrame(x[:, 1:])
x.columns = ['delim', 'prior', 'posterior']
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
x["nspecies"] = nspecies
res.append(x)
return res
|
[
"a",
"subfunction",
"for",
"summarizing",
"results"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L741-L787
|
[
"def",
"_parse_01",
"(",
"ofiles",
",",
"individual",
"=",
"False",
")",
":",
"## parse results from outfiles",
"cols",
"=",
"[",
"]",
"dats",
"=",
"[",
"]",
"for",
"ofile",
"in",
"ofiles",
":",
"## parse file",
"with",
"open",
"(",
"ofile",
")",
"as",
"infile",
":",
"dat",
"=",
"infile",
".",
"read",
"(",
")",
"lastbits",
"=",
"dat",
".",
"split",
"(",
"\".mcmc.txt\\n\\n\"",
")",
"[",
"1",
":",
"]",
"results",
"=",
"lastbits",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\n\\n\"",
")",
"[",
"0",
"]",
".",
"split",
"(",
")",
"## get shape from ...",
"shape",
"=",
"(",
"(",
"(",
"len",
"(",
"results",
")",
"-",
"3",
")",
"/",
"4",
")",
",",
"4",
")",
"dat",
"=",
"np",
".",
"array",
"(",
"results",
"[",
"3",
":",
"]",
")",
".",
"reshape",
"(",
"shape",
")",
"cols",
".",
"append",
"(",
"dat",
"[",
":",
",",
"3",
"]",
".",
"astype",
"(",
"float",
")",
")",
"if",
"not",
"individual",
":",
"## get mean results across reps",
"cols",
"=",
"np",
".",
"array",
"(",
"cols",
")",
"cols",
"=",
"cols",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"/",
"len",
"(",
"ofiles",
")",
"#10.",
"dat",
"[",
":",
",",
"3",
"]",
"=",
"cols",
".",
"astype",
"(",
"str",
")",
"## format as a DF",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"dat",
"[",
":",
",",
"1",
":",
"]",
")",
"df",
".",
"columns",
"=",
"[",
"\"delim\"",
",",
"\"prior\"",
",",
"\"posterior\"",
"]",
"nspecies",
"=",
"1",
"+",
"np",
".",
"array",
"(",
"[",
"list",
"(",
"i",
")",
"for",
"i",
"in",
"dat",
"[",
":",
",",
"1",
"]",
"]",
",",
"dtype",
"=",
"int",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"df",
"[",
"\"nspecies\"",
"]",
"=",
"nspecies",
"return",
"df",
"else",
":",
"## get mean results across reps",
"#return cols",
"res",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"cols",
")",
")",
":",
"x",
"=",
"dat",
"x",
"[",
":",
",",
"3",
"]",
"=",
"cols",
"[",
"i",
"]",
".",
"astype",
"(",
"str",
")",
"x",
"=",
"pd",
".",
"DataFrame",
"(",
"x",
"[",
":",
",",
"1",
":",
"]",
")",
"x",
".",
"columns",
"=",
"[",
"'delim'",
",",
"'prior'",
",",
"'posterior'",
"]",
"nspecies",
"=",
"1",
"+",
"np",
".",
"array",
"(",
"[",
"list",
"(",
"i",
")",
"for",
"i",
"in",
"dat",
"[",
":",
",",
"1",
"]",
"]",
",",
"dtype",
"=",
"int",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"x",
"[",
"\"nspecies\"",
"]",
"=",
"nspecies",
"res",
".",
"append",
"(",
"x",
")",
"return",
"res"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Bpp._load_existing_results
|
Load existing results files for an object with this workdir and name.
This does NOT reload the parameter settings for the object...
|
ipyrad/analysis/bpp.py
|
def _load_existing_results(self, name, workdir):
"""
Load existing results files for an object with this workdir and name.
This does NOT reload the parameter settings for the object...
"""
## get mcmcs
path = os.path.realpath(os.path.join(self.workdir, self.name))
mcmcs = glob.glob(path+"_r*.mcmc.txt")
outs = glob.glob(path+"_r*.out.txt")
trees = glob.glob(path+"_r*.tre")
for mcmcfile in mcmcs:
if mcmcfile not in self.files.mcmcfiles:
self.files.mcmcfiles.append(mcmcfile)
for outfile in outs:
if outfile not in self.files.outfiles:
self.files.outfiles.append(outfile)
for tree in trees:
if tree not in self.files.treefiles:
self.files.treefiles.append(tree)
|
def _load_existing_results(self, name, workdir):
"""
Load existing results files for an object with this workdir and name.
This does NOT reload the parameter settings for the object...
"""
## get mcmcs
path = os.path.realpath(os.path.join(self.workdir, self.name))
mcmcs = glob.glob(path+"_r*.mcmc.txt")
outs = glob.glob(path+"_r*.out.txt")
trees = glob.glob(path+"_r*.tre")
for mcmcfile in mcmcs:
if mcmcfile not in self.files.mcmcfiles:
self.files.mcmcfiles.append(mcmcfile)
for outfile in outs:
if outfile not in self.files.outfiles:
self.files.outfiles.append(outfile)
for tree in trees:
if tree not in self.files.treefiles:
self.files.treefiles.append(tree)
|
[
"Load",
"existing",
"results",
"files",
"for",
"an",
"object",
"with",
"this",
"workdir",
"and",
"name",
".",
"This",
"does",
"NOT",
"reload",
"the",
"parameter",
"settings",
"for",
"the",
"object",
"..."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L274-L293
|
[
"def",
"_load_existing_results",
"(",
"self",
",",
"name",
",",
"workdir",
")",
":",
"## get mcmcs",
"path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"self",
".",
"name",
")",
")",
"mcmcs",
"=",
"glob",
".",
"glob",
"(",
"path",
"+",
"\"_r*.mcmc.txt\"",
")",
"outs",
"=",
"glob",
".",
"glob",
"(",
"path",
"+",
"\"_r*.out.txt\"",
")",
"trees",
"=",
"glob",
".",
"glob",
"(",
"path",
"+",
"\"_r*.tre\"",
")",
"for",
"mcmcfile",
"in",
"mcmcs",
":",
"if",
"mcmcfile",
"not",
"in",
"self",
".",
"files",
".",
"mcmcfiles",
":",
"self",
".",
"files",
".",
"mcmcfiles",
".",
"append",
"(",
"mcmcfile",
")",
"for",
"outfile",
"in",
"outs",
":",
"if",
"outfile",
"not",
"in",
"self",
".",
"files",
".",
"outfiles",
":",
"self",
".",
"files",
".",
"outfiles",
".",
"append",
"(",
"outfile",
")",
"for",
"tree",
"in",
"trees",
":",
"if",
"tree",
"not",
"in",
"self",
".",
"files",
".",
"treefiles",
":",
"self",
".",
"files",
".",
"treefiles",
".",
"append",
"(",
"tree",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Bpp.run
|
Submits bpp jobs to run on a cluster (ipyparallel Client).
The seed for the random number generator if not set is randomly
drawn, and if multiple reps are submitted (nreps>1) then each will
draw a subsequent random seeds after that. An ipyclient connection
is required. Asynchronous result objects are stored in the bpp
object submitting the jobs.
Parameters:
-----------
nreps (int):
submits nreps replicate jobs to the cluster each with a different
random seed drawn starting from the starting seed.
ipyclient (ipyparallel.Client)
an ipyparallel.Client object connected to a running cluster.
quiet (bool):
whether to print that the jobs have been submitted
randomize_order (bool):
if True then when maxloci is set this will randomly sample a
different set of N loci in each replicate, rather than sampling
just the first N loci < maxloci.
force (bool):
Overwrite existing files with the same name. Default=False, skip
over existing files.
|
ipyrad/analysis/bpp.py
|
def run(self,
ipyclient,
nreps=1,
quiet=False,
randomize_order=False,
force=False,
):
"""
Submits bpp jobs to run on a cluster (ipyparallel Client).
The seed for the random number generator if not set is randomly
drawn, and if multiple reps are submitted (nreps>1) then each will
draw a subsequent random seeds after that. An ipyclient connection
is required. Asynchronous result objects are stored in the bpp
object submitting the jobs.
Parameters:
-----------
nreps (int):
submits nreps replicate jobs to the cluster each with a different
random seed drawn starting from the starting seed.
ipyclient (ipyparallel.Client)
an ipyparallel.Client object connected to a running cluster.
quiet (bool):
whether to print that the jobs have been submitted
randomize_order (bool):
if True then when maxloci is set this will randomly sample a
different set of N loci in each replicate, rather than sampling
just the first N loci < maxloci.
force (bool):
Overwrite existing files with the same name. Default=False, skip
over existing files.
"""
## is this running algorithm 00?
is_alg00 = (not self.params.infer_sptree) and (not self.params.infer_delimit)
## clear out pre-existing files for this object
self.files.mcmcfiles = []
self.files.outfiles = []
self.files.treefiles = []
self.asyncs = []
## initiate random seed
np.random.seed(self.params.seed)
## load-balancer
lbview = ipyclient.load_balanced_view()
## send jobs
for job in xrange(nreps):
## make repname and make ctl filename
self._name = "{}_r{}".format(self.name, job)
ctlhandle = os.path.realpath(
os.path.join(self.workdir, "{}.ctl.txt".format(self._name)))
## skip if ctlfile exists
if (not force) and (os.path.exists(ctlhandle)):
print("Named ctl file already exists. Use force=True to" \
+" overwrite\nFilename:{}".format(ctlhandle))
else:
## change seed and ctl for each rep, this writes into the ctl
## file the correct name for the other files which share the
## same rep number in their names.
#self.params._seed = np.random.randint(0, 1e9, 1)[0]
self._write_mapfile()
#if randomize_order:
self._write_seqfile(randomize_order=randomize_order)
ctlfile = self._write_ctlfile()
## submit to engines
async = lbview.apply(_call_bpp, *(self._kwargs["binary"], ctlfile, is_alg00))
self.asyncs.append(async)
## save tree file if alg 00
if is_alg00:
self.files.treefiles.append(
ctlfile.rsplit(".ctl.txt", 1)[0] + ".tre")
if self.asyncs and (not quiet):
sys.stderr.write("submitted {} bpp jobs [{}] ({} loci)\n"\
.format(nreps, self.name, self._nloci))
|
def run(self,
ipyclient,
nreps=1,
quiet=False,
randomize_order=False,
force=False,
):
"""
Submits bpp jobs to run on a cluster (ipyparallel Client).
The seed for the random number generator if not set is randomly
drawn, and if multiple reps are submitted (nreps>1) then each will
draw a subsequent random seeds after that. An ipyclient connection
is required. Asynchronous result objects are stored in the bpp
object submitting the jobs.
Parameters:
-----------
nreps (int):
submits nreps replicate jobs to the cluster each with a different
random seed drawn starting from the starting seed.
ipyclient (ipyparallel.Client)
an ipyparallel.Client object connected to a running cluster.
quiet (bool):
whether to print that the jobs have been submitted
randomize_order (bool):
if True then when maxloci is set this will randomly sample a
different set of N loci in each replicate, rather than sampling
just the first N loci < maxloci.
force (bool):
Overwrite existing files with the same name. Default=False, skip
over existing files.
"""
## is this running algorithm 00?
is_alg00 = (not self.params.infer_sptree) and (not self.params.infer_delimit)
## clear out pre-existing files for this object
self.files.mcmcfiles = []
self.files.outfiles = []
self.files.treefiles = []
self.asyncs = []
## initiate random seed
np.random.seed(self.params.seed)
## load-balancer
lbview = ipyclient.load_balanced_view()
## send jobs
for job in xrange(nreps):
## make repname and make ctl filename
self._name = "{}_r{}".format(self.name, job)
ctlhandle = os.path.realpath(
os.path.join(self.workdir, "{}.ctl.txt".format(self._name)))
## skip if ctlfile exists
if (not force) and (os.path.exists(ctlhandle)):
print("Named ctl file already exists. Use force=True to" \
+" overwrite\nFilename:{}".format(ctlhandle))
else:
## change seed and ctl for each rep, this writes into the ctl
## file the correct name for the other files which share the
## same rep number in their names.
#self.params._seed = np.random.randint(0, 1e9, 1)[0]
self._write_mapfile()
#if randomize_order:
self._write_seqfile(randomize_order=randomize_order)
ctlfile = self._write_ctlfile()
## submit to engines
async = lbview.apply(_call_bpp, *(self._kwargs["binary"], ctlfile, is_alg00))
self.asyncs.append(async)
## save tree file if alg 00
if is_alg00:
self.files.treefiles.append(
ctlfile.rsplit(".ctl.txt", 1)[0] + ".tre")
if self.asyncs and (not quiet):
sys.stderr.write("submitted {} bpp jobs [{}] ({} loci)\n"\
.format(nreps, self.name, self._nloci))
|
[
"Submits",
"bpp",
"jobs",
"to",
"run",
"on",
"a",
"cluster",
"(",
"ipyparallel",
"Client",
")",
".",
"The",
"seed",
"for",
"the",
"random",
"number",
"generator",
"if",
"not",
"set",
"is",
"randomly",
"drawn",
"and",
"if",
"multiple",
"reps",
"are",
"submitted",
"(",
"nreps",
">",
"1",
")",
"then",
"each",
"will",
"draw",
"a",
"subsequent",
"random",
"seeds",
"after",
"that",
".",
"An",
"ipyclient",
"connection",
"is",
"required",
".",
"Asynchronous",
"result",
"objects",
"are",
"stored",
"in",
"the",
"bpp",
"object",
"submitting",
"the",
"jobs",
".",
"Parameters",
":",
"-----------",
"nreps",
"(",
"int",
")",
":",
"submits",
"nreps",
"replicate",
"jobs",
"to",
"the",
"cluster",
"each",
"with",
"a",
"different",
"random",
"seed",
"drawn",
"starting",
"from",
"the",
"starting",
"seed",
".",
"ipyclient",
"(",
"ipyparallel",
".",
"Client",
")",
"an",
"ipyparallel",
".",
"Client",
"object",
"connected",
"to",
"a",
"running",
"cluster",
".",
"quiet",
"(",
"bool",
")",
":",
"whether",
"to",
"print",
"that",
"the",
"jobs",
"have",
"been",
"submitted",
"randomize_order",
"(",
"bool",
")",
":",
"if",
"True",
"then",
"when",
"maxloci",
"is",
"set",
"this",
"will",
"randomly",
"sample",
"a",
"different",
"set",
"of",
"N",
"loci",
"in",
"each",
"replicate",
"rather",
"than",
"sampling",
"just",
"the",
"first",
"N",
"loci",
"<",
"maxloci",
".",
"force",
"(",
"bool",
")",
":",
"Overwrite",
"existing",
"files",
"with",
"the",
"same",
"name",
".",
"Default",
"=",
"False",
"skip",
"over",
"existing",
"files",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L296-L377
|
[
"def",
"run",
"(",
"self",
",",
"ipyclient",
",",
"nreps",
"=",
"1",
",",
"quiet",
"=",
"False",
",",
"randomize_order",
"=",
"False",
",",
"force",
"=",
"False",
",",
")",
":",
"## is this running algorithm 00?",
"is_alg00",
"=",
"(",
"not",
"self",
".",
"params",
".",
"infer_sptree",
")",
"and",
"(",
"not",
"self",
".",
"params",
".",
"infer_delimit",
")",
"## clear out pre-existing files for this object",
"self",
".",
"files",
".",
"mcmcfiles",
"=",
"[",
"]",
"self",
".",
"files",
".",
"outfiles",
"=",
"[",
"]",
"self",
".",
"files",
".",
"treefiles",
"=",
"[",
"]",
"self",
".",
"asyncs",
"=",
"[",
"]",
"## initiate random seed",
"np",
".",
"random",
".",
"seed",
"(",
"self",
".",
"params",
".",
"seed",
")",
"## load-balancer",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"## send jobs",
"for",
"job",
"in",
"xrange",
"(",
"nreps",
")",
":",
"## make repname and make ctl filename",
"self",
".",
"_name",
"=",
"\"{}_r{}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"job",
")",
"ctlhandle",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"{}.ctl.txt\"",
".",
"format",
"(",
"self",
".",
"_name",
")",
")",
")",
"## skip if ctlfile exists",
"if",
"(",
"not",
"force",
")",
"and",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"ctlhandle",
")",
")",
":",
"print",
"(",
"\"Named ctl file already exists. Use force=True to\"",
"+",
"\" overwrite\\nFilename:{}\"",
".",
"format",
"(",
"ctlhandle",
")",
")",
"else",
":",
"## change seed and ctl for each rep, this writes into the ctl",
"## file the correct name for the other files which share the ",
"## same rep number in their names.",
"#self.params._seed = np.random.randint(0, 1e9, 1)[0]",
"self",
".",
"_write_mapfile",
"(",
")",
"#if randomize_order:",
"self",
".",
"_write_seqfile",
"(",
"randomize_order",
"=",
"randomize_order",
")",
"ctlfile",
"=",
"self",
".",
"_write_ctlfile",
"(",
")",
"## submit to engines",
"async",
"=",
"lbview",
".",
"apply",
"(",
"_call_bpp",
",",
"*",
"(",
"self",
".",
"_kwargs",
"[",
"\"binary\"",
"]",
",",
"ctlfile",
",",
"is_alg00",
")",
")",
"self",
".",
"asyncs",
".",
"append",
"(",
"async",
")",
"## save tree file if alg 00",
"if",
"is_alg00",
":",
"self",
".",
"files",
".",
"treefiles",
".",
"append",
"(",
"ctlfile",
".",
"rsplit",
"(",
"\".ctl.txt\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\".tre\"",
")",
"if",
"self",
".",
"asyncs",
"and",
"(",
"not",
"quiet",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"submitted {} bpp jobs [{}] ({} loci)\\n\"",
".",
"format",
"(",
"nreps",
",",
"self",
".",
"name",
",",
"self",
".",
"_nloci",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Bpp.write_bpp_files
|
Writes bpp files (.ctl, .seq, .imap) to the working directory.
Parameters:
------------
randomize_order (bool):
whether to randomize the locus order, this will allow you to
sample different subsets of loci in different replicates when
using the filters.maxloci option.
quiet (bool):
whether to print info to stderr when finished.
|
ipyrad/analysis/bpp.py
|
def write_bpp_files(self, randomize_order=False, quiet=False):
"""
Writes bpp files (.ctl, .seq, .imap) to the working directory.
Parameters:
------------
randomize_order (bool):
whether to randomize the locus order, this will allow you to
sample different subsets of loci in different replicates when
using the filters.maxloci option.
quiet (bool):
whether to print info to stderr when finished.
"""
## remove any old jobs with this same job name
self._name = self.name
oldjobs = glob.glob(os.path.join(self.workdir, self._name+"*.ctl.txt"))
for job in oldjobs:
os.remove(job)
## check params types
## ...
## write tmp files for the job
self._write_seqfile(randomize_order=randomize_order)
self._write_mapfile()#name=True)
self._write_ctlfile()
if not quiet:
sys.stderr.write("input files created for job {} ({} loci)\n"\
.format(self._name, self._nloci))
|
def write_bpp_files(self, randomize_order=False, quiet=False):
"""
Writes bpp files (.ctl, .seq, .imap) to the working directory.
Parameters:
------------
randomize_order (bool):
whether to randomize the locus order, this will allow you to
sample different subsets of loci in different replicates when
using the filters.maxloci option.
quiet (bool):
whether to print info to stderr when finished.
"""
## remove any old jobs with this same job name
self._name = self.name
oldjobs = glob.glob(os.path.join(self.workdir, self._name+"*.ctl.txt"))
for job in oldjobs:
os.remove(job)
## check params types
## ...
## write tmp files for the job
self._write_seqfile(randomize_order=randomize_order)
self._write_mapfile()#name=True)
self._write_ctlfile()
if not quiet:
sys.stderr.write("input files created for job {} ({} loci)\n"\
.format(self._name, self._nloci))
|
[
"Writes",
"bpp",
"files",
"(",
".",
"ctl",
".",
"seq",
".",
"imap",
")",
"to",
"the",
"working",
"directory",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L381-L411
|
[
"def",
"write_bpp_files",
"(",
"self",
",",
"randomize_order",
"=",
"False",
",",
"quiet",
"=",
"False",
")",
":",
"## remove any old jobs with this same job name",
"self",
".",
"_name",
"=",
"self",
".",
"name",
"oldjobs",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"self",
".",
"_name",
"+",
"\"*.ctl.txt\"",
")",
")",
"for",
"job",
"in",
"oldjobs",
":",
"os",
".",
"remove",
"(",
"job",
")",
"## check params types",
"## ...",
"## write tmp files for the job",
"self",
".",
"_write_seqfile",
"(",
"randomize_order",
"=",
"randomize_order",
")",
"self",
".",
"_write_mapfile",
"(",
")",
"#name=True)",
"self",
".",
"_write_ctlfile",
"(",
")",
"if",
"not",
"quiet",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"input files created for job {} ({} loci)\\n\"",
".",
"format",
"(",
"self",
".",
"_name",
",",
"self",
".",
"_nloci",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Bpp._write_ctlfile
|
write outfile with any args in argdict
|
ipyrad/analysis/bpp.py
|
def _write_ctlfile(self):#, rep=None):
""" write outfile with any args in argdict """
## A string to store ctl info
ctl = []
## write the top header info
ctl.append("seed = {}".format(self.params.seed))
ctl.append("seqfile = {}".format(self.seqfile))
ctl.append("Imapfile = {}".format(self.mapfile))
path = os.path.realpath(os.path.join(self.workdir, self._name))
mcmcfile = "{}.mcmc.txt".format(path)
outfile = "{}.out.txt".format(path)
if mcmcfile not in self.files.mcmcfiles:
self.files.mcmcfiles.append(mcmcfile)
if outfile not in self.files.outfiles:
self.files.outfiles.append(outfile)
ctl.append("mcmcfile = {}".format(mcmcfile))
ctl.append("outfile = {}".format(outfile))
## number of loci (checks that seq file exists and parses from there)
ctl.append("nloci = {}".format(self._nloci))
ctl.append("usedata = {}".format(self.params.usedata))
ctl.append("cleandata = {}".format(self.params.cleandata))
## infer species tree
if self.params.infer_sptree:
ctl.append("speciestree = 1 0.4 0.2 0.1")
else:
ctl.append("speciestree = 0")
## infer delimitation (with algorithm 1 by default)
ctl.append("speciesdelimitation = {} {} {}"\
.format(self.params.infer_delimit,
self.params.delimit_alg[0],
" ".join([str(i) for i in self.params.delimit_alg[1:]])
)
)
## get tree values
nspecies = str(len(self.imap))
species = " ".join(sorted(self.imap))
ninds = " ".join([str(len(self.imap[i])) for i in sorted(self.imap)])
ctl.append(SPECIESTREE.format(nspecies, species, ninds, self.tree.write(format=9)))
## priors
ctl.append("thetaprior = {} {}".format(*self.params.thetaprior))
ctl.append("tauprior = {} {} {}".format(*self.params.tauprior))
## other values, fixed for now
ctl.append("finetune = 1: {}".format(" ".join([str(i) for i in self.params.finetune])))
#CTL.append("finetune = 1: 1 0.002 0.01 0.01 0.02 0.005 1.0")
ctl.append("print = 1 0 0 0")
ctl.append("burnin = {}".format(self.params.burnin))
ctl.append("sampfreq = {}".format(self.params.sampfreq))
ctl.append("nsample = {}".format(self.params.nsample))
## write out the ctl file
ctlhandle = os.path.realpath(
"{}.ctl.txt".format(os.path.join(self.workdir, self._name)))
# if isinstance(rep, int):
# ctlhandle = os.path.realpath(
# "{}-r{}.ctl.txt".format(os.path.join(self.workdir, self._name), rep))
# else:
# ctlhandle = os.path.realpath(
# "{}.ctl.txt".format(os.path.join(self.workdir, self._name)))
with open(ctlhandle, 'w') as out:
out.write("\n".join(ctl))
return ctlhandle
|
def _write_ctlfile(self):#, rep=None):
""" write outfile with any args in argdict """
## A string to store ctl info
ctl = []
## write the top header info
ctl.append("seed = {}".format(self.params.seed))
ctl.append("seqfile = {}".format(self.seqfile))
ctl.append("Imapfile = {}".format(self.mapfile))
path = os.path.realpath(os.path.join(self.workdir, self._name))
mcmcfile = "{}.mcmc.txt".format(path)
outfile = "{}.out.txt".format(path)
if mcmcfile not in self.files.mcmcfiles:
self.files.mcmcfiles.append(mcmcfile)
if outfile not in self.files.outfiles:
self.files.outfiles.append(outfile)
ctl.append("mcmcfile = {}".format(mcmcfile))
ctl.append("outfile = {}".format(outfile))
## number of loci (checks that seq file exists and parses from there)
ctl.append("nloci = {}".format(self._nloci))
ctl.append("usedata = {}".format(self.params.usedata))
ctl.append("cleandata = {}".format(self.params.cleandata))
## infer species tree
if self.params.infer_sptree:
ctl.append("speciestree = 1 0.4 0.2 0.1")
else:
ctl.append("speciestree = 0")
## infer delimitation (with algorithm 1 by default)
ctl.append("speciesdelimitation = {} {} {}"\
.format(self.params.infer_delimit,
self.params.delimit_alg[0],
" ".join([str(i) for i in self.params.delimit_alg[1:]])
)
)
## get tree values
nspecies = str(len(self.imap))
species = " ".join(sorted(self.imap))
ninds = " ".join([str(len(self.imap[i])) for i in sorted(self.imap)])
ctl.append(SPECIESTREE.format(nspecies, species, ninds, self.tree.write(format=9)))
## priors
ctl.append("thetaprior = {} {}".format(*self.params.thetaprior))
ctl.append("tauprior = {} {} {}".format(*self.params.tauprior))
## other values, fixed for now
ctl.append("finetune = 1: {}".format(" ".join([str(i) for i in self.params.finetune])))
#CTL.append("finetune = 1: 1 0.002 0.01 0.01 0.02 0.005 1.0")
ctl.append("print = 1 0 0 0")
ctl.append("burnin = {}".format(self.params.burnin))
ctl.append("sampfreq = {}".format(self.params.sampfreq))
ctl.append("nsample = {}".format(self.params.nsample))
## write out the ctl file
ctlhandle = os.path.realpath(
"{}.ctl.txt".format(os.path.join(self.workdir, self._name)))
# if isinstance(rep, int):
# ctlhandle = os.path.realpath(
# "{}-r{}.ctl.txt".format(os.path.join(self.workdir, self._name), rep))
# else:
# ctlhandle = os.path.realpath(
# "{}.ctl.txt".format(os.path.join(self.workdir, self._name)))
with open(ctlhandle, 'w') as out:
out.write("\n".join(ctl))
return ctlhandle
|
[
"write",
"outfile",
"with",
"any",
"args",
"in",
"argdict"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L513-L583
|
[
"def",
"_write_ctlfile",
"(",
"self",
")",
":",
"#, rep=None):",
"## A string to store ctl info",
"ctl",
"=",
"[",
"]",
"## write the top header info",
"ctl",
".",
"append",
"(",
"\"seed = {}\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"seed",
")",
")",
"ctl",
".",
"append",
"(",
"\"seqfile = {}\"",
".",
"format",
"(",
"self",
".",
"seqfile",
")",
")",
"ctl",
".",
"append",
"(",
"\"Imapfile = {}\"",
".",
"format",
"(",
"self",
".",
"mapfile",
")",
")",
"path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"self",
".",
"_name",
")",
")",
"mcmcfile",
"=",
"\"{}.mcmc.txt\"",
".",
"format",
"(",
"path",
")",
"outfile",
"=",
"\"{}.out.txt\"",
".",
"format",
"(",
"path",
")",
"if",
"mcmcfile",
"not",
"in",
"self",
".",
"files",
".",
"mcmcfiles",
":",
"self",
".",
"files",
".",
"mcmcfiles",
".",
"append",
"(",
"mcmcfile",
")",
"if",
"outfile",
"not",
"in",
"self",
".",
"files",
".",
"outfiles",
":",
"self",
".",
"files",
".",
"outfiles",
".",
"append",
"(",
"outfile",
")",
"ctl",
".",
"append",
"(",
"\"mcmcfile = {}\"",
".",
"format",
"(",
"mcmcfile",
")",
")",
"ctl",
".",
"append",
"(",
"\"outfile = {}\"",
".",
"format",
"(",
"outfile",
")",
")",
"## number of loci (checks that seq file exists and parses from there)",
"ctl",
".",
"append",
"(",
"\"nloci = {}\"",
".",
"format",
"(",
"self",
".",
"_nloci",
")",
")",
"ctl",
".",
"append",
"(",
"\"usedata = {}\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"usedata",
")",
")",
"ctl",
".",
"append",
"(",
"\"cleandata = {}\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"cleandata",
")",
")",
"## infer species tree",
"if",
"self",
".",
"params",
".",
"infer_sptree",
":",
"ctl",
".",
"append",
"(",
"\"speciestree = 1 0.4 0.2 0.1\"",
")",
"else",
":",
"ctl",
".",
"append",
"(",
"\"speciestree = 0\"",
")",
"## infer delimitation (with algorithm 1 by default)",
"ctl",
".",
"append",
"(",
"\"speciesdelimitation = {} {} {}\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"infer_delimit",
",",
"self",
".",
"params",
".",
"delimit_alg",
"[",
"0",
"]",
",",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"self",
".",
"params",
".",
"delimit_alg",
"[",
"1",
":",
"]",
"]",
")",
")",
")",
"## get tree values",
"nspecies",
"=",
"str",
"(",
"len",
"(",
"self",
".",
"imap",
")",
")",
"species",
"=",
"\" \"",
".",
"join",
"(",
"sorted",
"(",
"self",
".",
"imap",
")",
")",
"ninds",
"=",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"len",
"(",
"self",
".",
"imap",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"sorted",
"(",
"self",
".",
"imap",
")",
"]",
")",
"ctl",
".",
"append",
"(",
"SPECIESTREE",
".",
"format",
"(",
"nspecies",
",",
"species",
",",
"ninds",
",",
"self",
".",
"tree",
".",
"write",
"(",
"format",
"=",
"9",
")",
")",
")",
"## priors",
"ctl",
".",
"append",
"(",
"\"thetaprior = {} {}\"",
".",
"format",
"(",
"*",
"self",
".",
"params",
".",
"thetaprior",
")",
")",
"ctl",
".",
"append",
"(",
"\"tauprior = {} {} {}\"",
".",
"format",
"(",
"*",
"self",
".",
"params",
".",
"tauprior",
")",
")",
"## other values, fixed for now",
"ctl",
".",
"append",
"(",
"\"finetune = 1: {}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"self",
".",
"params",
".",
"finetune",
"]",
")",
")",
")",
"#CTL.append(\"finetune = 1: 1 0.002 0.01 0.01 0.02 0.005 1.0\")",
"ctl",
".",
"append",
"(",
"\"print = 1 0 0 0\"",
")",
"ctl",
".",
"append",
"(",
"\"burnin = {}\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"burnin",
")",
")",
"ctl",
".",
"append",
"(",
"\"sampfreq = {}\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"sampfreq",
")",
")",
"ctl",
".",
"append",
"(",
"\"nsample = {}\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"nsample",
")",
")",
"## write out the ctl file",
"ctlhandle",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"\"{}.ctl.txt\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"self",
".",
"_name",
")",
")",
")",
"# if isinstance(rep, int):",
"# ctlhandle = os.path.realpath(",
"# \"{}-r{}.ctl.txt\".format(os.path.join(self.workdir, self._name), rep))",
"# else:",
"# ctlhandle = os.path.realpath(",
"# \"{}.ctl.txt\".format(os.path.join(self.workdir, self._name)))",
"with",
"open",
"(",
"ctlhandle",
",",
"'w'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"ctl",
")",
")",
"return",
"ctlhandle"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Bpp.copy
|
Returns a copy of the bpp object with the same parameter settings
but with the files.mcmcfiles and files.outfiles attributes cleared,
and with a new 'name' attribute.
Parameters
----------
name (str):
A name for the new copied bpp object that will be used for the
output files created by the object.
|
ipyrad/analysis/bpp.py
|
def copy(self, name, load_existing_results=False):
"""
Returns a copy of the bpp object with the same parameter settings
but with the files.mcmcfiles and files.outfiles attributes cleared,
and with a new 'name' attribute.
Parameters
----------
name (str):
A name for the new copied bpp object that will be used for the
output files created by the object.
"""
## make deepcopy of self.__dict__ but do not copy async objects
subdict = {i:j for i,j in self.__dict__.iteritems() if i != "asyncs"}
newdict = copy.deepcopy(subdict)
## make back into a bpp object
if name == self.name:
raise Exception("new object must have a different 'name' than its parent")
newobj = Bpp(
name=name,
data=newdict["files"].data,
workdir=newdict["workdir"],
guidetree=newdict["tree"].write(),
imap={i:j for i, j in newdict["imap"].items()},
copied=True,
load_existing_results=load_existing_results,
)
## update special dict attributes but not files
for key, val in newobj.params.__dict__.iteritems():
newobj.params.__setattr__(key, self.params.__getattribute__(key))
for key, val in newobj.filters.__dict__.iteritems():
newobj.filters.__setattr__(key, self.filters.__getattribute__(key))
## new object must have a different name than it's parent
return newobj
|
def copy(self, name, load_existing_results=False):
"""
Returns a copy of the bpp object with the same parameter settings
but with the files.mcmcfiles and files.outfiles attributes cleared,
and with a new 'name' attribute.
Parameters
----------
name (str):
A name for the new copied bpp object that will be used for the
output files created by the object.
"""
## make deepcopy of self.__dict__ but do not copy async objects
subdict = {i:j for i,j in self.__dict__.iteritems() if i != "asyncs"}
newdict = copy.deepcopy(subdict)
## make back into a bpp object
if name == self.name:
raise Exception("new object must have a different 'name' than its parent")
newobj = Bpp(
name=name,
data=newdict["files"].data,
workdir=newdict["workdir"],
guidetree=newdict["tree"].write(),
imap={i:j for i, j in newdict["imap"].items()},
copied=True,
load_existing_results=load_existing_results,
)
## update special dict attributes but not files
for key, val in newobj.params.__dict__.iteritems():
newobj.params.__setattr__(key, self.params.__getattribute__(key))
for key, val in newobj.filters.__dict__.iteritems():
newobj.filters.__setattr__(key, self.filters.__getattribute__(key))
## new object must have a different name than it's parent
return newobj
|
[
"Returns",
"a",
"copy",
"of",
"the",
"bpp",
"object",
"with",
"the",
"same",
"parameter",
"settings",
"but",
"with",
"the",
"files",
".",
"mcmcfiles",
"and",
"files",
".",
"outfiles",
"attributes",
"cleared",
"and",
"with",
"a",
"new",
"name",
"attribute",
".",
"Parameters",
"----------",
"name",
"(",
"str",
")",
":",
"A",
"name",
"for",
"the",
"new",
"copied",
"bpp",
"object",
"that",
"will",
"be",
"used",
"for",
"the",
"output",
"files",
"created",
"by",
"the",
"object",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L587-L625
|
[
"def",
"copy",
"(",
"self",
",",
"name",
",",
"load_existing_results",
"=",
"False",
")",
":",
"## make deepcopy of self.__dict__ but do not copy async objects",
"subdict",
"=",
"{",
"i",
":",
"j",
"for",
"i",
",",
"j",
"in",
"self",
".",
"__dict__",
".",
"iteritems",
"(",
")",
"if",
"i",
"!=",
"\"asyncs\"",
"}",
"newdict",
"=",
"copy",
".",
"deepcopy",
"(",
"subdict",
")",
"## make back into a bpp object",
"if",
"name",
"==",
"self",
".",
"name",
":",
"raise",
"Exception",
"(",
"\"new object must have a different 'name' than its parent\"",
")",
"newobj",
"=",
"Bpp",
"(",
"name",
"=",
"name",
",",
"data",
"=",
"newdict",
"[",
"\"files\"",
"]",
".",
"data",
",",
"workdir",
"=",
"newdict",
"[",
"\"workdir\"",
"]",
",",
"guidetree",
"=",
"newdict",
"[",
"\"tree\"",
"]",
".",
"write",
"(",
")",
",",
"imap",
"=",
"{",
"i",
":",
"j",
"for",
"i",
",",
"j",
"in",
"newdict",
"[",
"\"imap\"",
"]",
".",
"items",
"(",
")",
"}",
",",
"copied",
"=",
"True",
",",
"load_existing_results",
"=",
"load_existing_results",
",",
")",
"## update special dict attributes but not files",
"for",
"key",
",",
"val",
"in",
"newobj",
".",
"params",
".",
"__dict__",
".",
"iteritems",
"(",
")",
":",
"newobj",
".",
"params",
".",
"__setattr__",
"(",
"key",
",",
"self",
".",
"params",
".",
"__getattribute__",
"(",
"key",
")",
")",
"for",
"key",
",",
"val",
"in",
"newobj",
".",
"filters",
".",
"__dict__",
".",
"iteritems",
"(",
")",
":",
"newobj",
".",
"filters",
".",
"__setattr__",
"(",
"key",
",",
"self",
".",
"filters",
".",
"__getattribute__",
"(",
"key",
")",
")",
"## new object must have a different name than it's parent",
"return",
"newobj"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Bpp.summarize_results
|
Prints a summarized table of results from replicate runs, or,
if individual_result=True, then returns a list of separate
dataframes for each replicate run.
|
ipyrad/analysis/bpp.py
|
def summarize_results(self, individual_results=False):
"""
Prints a summarized table of results from replicate runs, or,
if individual_result=True, then returns a list of separate
dataframes for each replicate run.
"""
## return results depending on algorithm
## algorithm 00
if (not self.params.infer_delimit) & (not self.params.infer_sptree):
if individual_results:
## return a list of parsed CSV results
return [_parse_00(i) for i in self.files.outfiles]
else:
## concatenate each CSV and then get stats w/ describe
return pd.concat(
[pd.read_csv(i, sep='\t', index_col=0) \
for i in self.files.mcmcfiles]).describe().T
## algorithm 01
if self.params.infer_delimit & (not self.params.infer_sptree):
return _parse_01(self.files.outfiles, individual=individual_results)
## others
else:
return "summary function not yet ready for this type of result"
|
def summarize_results(self, individual_results=False):
"""
Prints a summarized table of results from replicate runs, or,
if individual_result=True, then returns a list of separate
dataframes for each replicate run.
"""
## return results depending on algorithm
## algorithm 00
if (not self.params.infer_delimit) & (not self.params.infer_sptree):
if individual_results:
## return a list of parsed CSV results
return [_parse_00(i) for i in self.files.outfiles]
else:
## concatenate each CSV and then get stats w/ describe
return pd.concat(
[pd.read_csv(i, sep='\t', index_col=0) \
for i in self.files.mcmcfiles]).describe().T
## algorithm 01
if self.params.infer_delimit & (not self.params.infer_sptree):
return _parse_01(self.files.outfiles, individual=individual_results)
## others
else:
return "summary function not yet ready for this type of result"
|
[
"Prints",
"a",
"summarized",
"table",
"of",
"results",
"from",
"replicate",
"runs",
"or",
"if",
"individual_result",
"=",
"True",
"then",
"returns",
"a",
"list",
"of",
"separate",
"dataframes",
"for",
"each",
"replicate",
"run",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bpp.py#L629-L655
|
[
"def",
"summarize_results",
"(",
"self",
",",
"individual_results",
"=",
"False",
")",
":",
"## return results depending on algorithm",
"## algorithm 00",
"if",
"(",
"not",
"self",
".",
"params",
".",
"infer_delimit",
")",
"&",
"(",
"not",
"self",
".",
"params",
".",
"infer_sptree",
")",
":",
"if",
"individual_results",
":",
"## return a list of parsed CSV results",
"return",
"[",
"_parse_00",
"(",
"i",
")",
"for",
"i",
"in",
"self",
".",
"files",
".",
"outfiles",
"]",
"else",
":",
"## concatenate each CSV and then get stats w/ describe",
"return",
"pd",
".",
"concat",
"(",
"[",
"pd",
".",
"read_csv",
"(",
"i",
",",
"sep",
"=",
"'\\t'",
",",
"index_col",
"=",
"0",
")",
"for",
"i",
"in",
"self",
".",
"files",
".",
"mcmcfiles",
"]",
")",
".",
"describe",
"(",
")",
".",
"T",
"## algorithm 01",
"if",
"self",
".",
"params",
".",
"infer_delimit",
"&",
"(",
"not",
"self",
".",
"params",
".",
"infer_sptree",
")",
":",
"return",
"_parse_01",
"(",
"self",
".",
"files",
".",
"outfiles",
",",
"individual",
"=",
"individual_results",
")",
"## others",
"else",
":",
"return",
"\"summary function not yet ready for this type of result\""
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
persistent_popen_align3
|
notes
|
ipyrad/assemble/cluster_across.py
|
def persistent_popen_align3(data, samples, chunk):
""" notes """
## data are already chunked, read in the whole thing
with open(chunk, 'rb') as infile:
clusts = infile.read().split("//\n//\n")[:-1]
## snames to ensure sorted order
samples.sort(key=lambda x: x.name)
snames = [sample.name for sample in samples]
## make a tmparr to store metadata (this can get huge, consider using h5)
maxlen = data._hackersonly["max_fragment_length"] + 20
indels = np.zeros((len(samples), len(clusts), maxlen), dtype=np.bool_)
duples = np.zeros(len(clusts), dtype=np.bool_)
## create a persistent shell for running muscle in.
proc = sps.Popen(["bash"],
stdin=sps.PIPE,
stdout=sps.PIPE,
universal_newlines=True)
## iterate over clusters until finished
allstack = []
#istack = []
for ldx in xrange(len(clusts)):
## new alignment string for read1s and read2s
aligned = []
istack = []
lines = clusts[ldx].strip().split("\n")
names = lines[::2]
seqs = lines[1::2]
align1 = ""
align2 = ""
## we don't allow seeds with no hits to make it here, currently
#if len(names) == 1:
# aligned.append(clusts[ldx].replace(">", "").strip())
## find duplicates and skip aligning but keep it for downstream.
if len(names) != len(set([x.rsplit("_", 1)[0] for x in names])):
duples[ldx] = 1
istack = ["{}\n{}".format(i[1:], j) for i, j in zip(names, seqs)]
#aligned.append(clusts[ldx].replace(">", "").strip())
else:
## append counter to names because muscle doesn't retain order
names = [">{};*{}".format(j[1:], i) for i, j in enumerate(names)]
try:
## try to split names on nnnn splitter
clust1, clust2 = zip(*[i.split("nnnn") for i in seqs])
## make back into strings
cl1 = "\n".join(itertools.chain(*zip(names, clust1)))
cl2 = "\n".join(itertools.chain(*zip(names, clust2)))
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## send align2 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd2 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl2, ipyrad.bins.muscle, "//")
print(cmd2, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align2 += line
## join the aligned read1 and read2 and ensure name order match
la1 = align1[1:].split("\n>")
la2 = align2[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
dalign2 = dict([i.split("\n", 1) for i in la2])
keys = sorted(dalign1.keys(), key=DEREP)
keys2 = sorted(dalign2.keys(), key=DEREP)
## Make sure R1 and R2 actually exist for each sample. If not
## bail out of this cluster.
if not len(keys) == len(keys2):
LOGGER.error("R1 and R2 results differ in length: "\
+ "\nR1 - {}\nR2 - {}".format(keys, keys2))
continue
## impute allele (lowercase) info back into alignments
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")+\
"nnnn"+dalign2[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
#LOGGER.info(concatseq)
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put into a dict for writing to file
#aligned = []
#for key in keys:
# aligned.append("\n".join(
# [key,
# dalign1[key].replace("\n", "")+"nnnn"+\
# dalign2[key].replace("\n", "")]))
except IndexError as inst:
LOGGER.debug("Error in PE - ldx: {}".format())
LOGGER.debug("Vars: {}".format(dict(globals(), **locals())))
raise
except ValueError:
## make back into strings
cl1 = "\n".join(["\n".join(i) for i in zip(names, seqs)])
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell (TODO: check for pipe-overflow)
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## ensure name order match
la1 = align1[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
keys = sorted(dalign1.keys(), key=DEREP)
## put into dict for writing to file
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put aligned locus in list
#aligned.append("\n".join(inner_aligned))
## enforce maxlen on aligned seqs
aseqs = np.vstack([list(i.split("\n")[1]) for i in aligned])
LOGGER.info("\naseqs here: %s", aseqs)
## index names by snames order
sidxs = [snames.index(key.rsplit("_", 1)[0]) for key in keys]
thislen = min(maxlen, aseqs.shape[1])
for idx in xrange(aseqs.shape[0]):
## enter into stack
newn = aligned[idx].split(";", 1)[0]
#newn = key[idx].split(";", 1)[0]
istack.append("{}\n{}".format(newn, aseqs[idx, :thislen].tostring()))
## name index in sorted list (indels order)
sidx = sidxs[idx]
indels[sidx, ldx, :thislen] = aseqs[idx, :thislen] == "-"
if istack:
allstack.append("\n".join(istack))
#LOGGER.debug("\n\nSTACK (%s)\n%s\n", duples[ldx], "\n".join(istack))
## cleanup
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
proc.stdin.close()
proc.wait()
#LOGGER.info("\n\nALLSTACK %s\n", "\n".join(i) for i in allstack[:5]])
## write to file after
odx = chunk.rsplit("_")[-1]
alignfile = os.path.join(data.tmpdir, "align_{}.fa".format(odx))
with open(alignfile, 'wb') as outfile:
outfile.write("\n//\n//\n".join(allstack)+"\n")
os.remove(chunk)
## save indels array to tmp dir
ifile = os.path.join(data.tmpdir, "indels_{}.tmp.npy".format(odx))
np.save(ifile, indels)
dfile = os.path.join(data.tmpdir, "duples_{}.tmp.npy".format(odx))
np.save(dfile, duples)
|
def persistent_popen_align3(data, samples, chunk):
""" notes """
## data are already chunked, read in the whole thing
with open(chunk, 'rb') as infile:
clusts = infile.read().split("//\n//\n")[:-1]
## snames to ensure sorted order
samples.sort(key=lambda x: x.name)
snames = [sample.name for sample in samples]
## make a tmparr to store metadata (this can get huge, consider using h5)
maxlen = data._hackersonly["max_fragment_length"] + 20
indels = np.zeros((len(samples), len(clusts), maxlen), dtype=np.bool_)
duples = np.zeros(len(clusts), dtype=np.bool_)
## create a persistent shell for running muscle in.
proc = sps.Popen(["bash"],
stdin=sps.PIPE,
stdout=sps.PIPE,
universal_newlines=True)
## iterate over clusters until finished
allstack = []
#istack = []
for ldx in xrange(len(clusts)):
## new alignment string for read1s and read2s
aligned = []
istack = []
lines = clusts[ldx].strip().split("\n")
names = lines[::2]
seqs = lines[1::2]
align1 = ""
align2 = ""
## we don't allow seeds with no hits to make it here, currently
#if len(names) == 1:
# aligned.append(clusts[ldx].replace(">", "").strip())
## find duplicates and skip aligning but keep it for downstream.
if len(names) != len(set([x.rsplit("_", 1)[0] for x in names])):
duples[ldx] = 1
istack = ["{}\n{}".format(i[1:], j) for i, j in zip(names, seqs)]
#aligned.append(clusts[ldx].replace(">", "").strip())
else:
## append counter to names because muscle doesn't retain order
names = [">{};*{}".format(j[1:], i) for i, j in enumerate(names)]
try:
## try to split names on nnnn splitter
clust1, clust2 = zip(*[i.split("nnnn") for i in seqs])
## make back into strings
cl1 = "\n".join(itertools.chain(*zip(names, clust1)))
cl2 = "\n".join(itertools.chain(*zip(names, clust2)))
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## send align2 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd2 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl2, ipyrad.bins.muscle, "//")
print(cmd2, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align2 += line
## join the aligned read1 and read2 and ensure name order match
la1 = align1[1:].split("\n>")
la2 = align2[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
dalign2 = dict([i.split("\n", 1) for i in la2])
keys = sorted(dalign1.keys(), key=DEREP)
keys2 = sorted(dalign2.keys(), key=DEREP)
## Make sure R1 and R2 actually exist for each sample. If not
## bail out of this cluster.
if not len(keys) == len(keys2):
LOGGER.error("R1 and R2 results differ in length: "\
+ "\nR1 - {}\nR2 - {}".format(keys, keys2))
continue
## impute allele (lowercase) info back into alignments
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")+\
"nnnn"+dalign2[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
#LOGGER.info(concatseq)
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put into a dict for writing to file
#aligned = []
#for key in keys:
# aligned.append("\n".join(
# [key,
# dalign1[key].replace("\n", "")+"nnnn"+\
# dalign2[key].replace("\n", "")]))
except IndexError as inst:
LOGGER.debug("Error in PE - ldx: {}".format())
LOGGER.debug("Vars: {}".format(dict(globals(), **locals())))
raise
except ValueError:
## make back into strings
cl1 = "\n".join(["\n".join(i) for i in zip(names, seqs)])
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell (TODO: check for pipe-overflow)
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## ensure name order match
la1 = align1[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
keys = sorted(dalign1.keys(), key=DEREP)
## put into dict for writing to file
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put aligned locus in list
#aligned.append("\n".join(inner_aligned))
## enforce maxlen on aligned seqs
aseqs = np.vstack([list(i.split("\n")[1]) for i in aligned])
LOGGER.info("\naseqs here: %s", aseqs)
## index names by snames order
sidxs = [snames.index(key.rsplit("_", 1)[0]) for key in keys]
thislen = min(maxlen, aseqs.shape[1])
for idx in xrange(aseqs.shape[0]):
## enter into stack
newn = aligned[idx].split(";", 1)[0]
#newn = key[idx].split(";", 1)[0]
istack.append("{}\n{}".format(newn, aseqs[idx, :thislen].tostring()))
## name index in sorted list (indels order)
sidx = sidxs[idx]
indels[sidx, ldx, :thislen] = aseqs[idx, :thislen] == "-"
if istack:
allstack.append("\n".join(istack))
#LOGGER.debug("\n\nSTACK (%s)\n%s\n", duples[ldx], "\n".join(istack))
## cleanup
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
proc.stdin.close()
proc.wait()
#LOGGER.info("\n\nALLSTACK %s\n", "\n".join(i) for i in allstack[:5]])
## write to file after
odx = chunk.rsplit("_")[-1]
alignfile = os.path.join(data.tmpdir, "align_{}.fa".format(odx))
with open(alignfile, 'wb') as outfile:
outfile.write("\n//\n//\n".join(allstack)+"\n")
os.remove(chunk)
## save indels array to tmp dir
ifile = os.path.join(data.tmpdir, "indels_{}.tmp.npy".format(odx))
np.save(ifile, indels)
dfile = os.path.join(data.tmpdir, "duples_{}.tmp.npy".format(odx))
np.save(dfile, duples)
|
[
"notes"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L46-L287
|
[
"def",
"persistent_popen_align3",
"(",
"data",
",",
"samples",
",",
"chunk",
")",
":",
"## data are already chunked, read in the whole thing",
"with",
"open",
"(",
"chunk",
",",
"'rb'",
")",
"as",
"infile",
":",
"clusts",
"=",
"infile",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"//\\n//\\n\"",
")",
"[",
":",
"-",
"1",
"]",
"## snames to ensure sorted order",
"samples",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"name",
")",
"snames",
"=",
"[",
"sample",
".",
"name",
"for",
"sample",
"in",
"samples",
"]",
"## make a tmparr to store metadata (this can get huge, consider using h5)",
"maxlen",
"=",
"data",
".",
"_hackersonly",
"[",
"\"max_fragment_length\"",
"]",
"+",
"20",
"indels",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"samples",
")",
",",
"len",
"(",
"clusts",
")",
",",
"maxlen",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"duples",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"clusts",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"## create a persistent shell for running muscle in. ",
"proc",
"=",
"sps",
".",
"Popen",
"(",
"[",
"\"bash\"",
"]",
",",
"stdin",
"=",
"sps",
".",
"PIPE",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"universal_newlines",
"=",
"True",
")",
"## iterate over clusters until finished",
"allstack",
"=",
"[",
"]",
"#istack = [] ",
"for",
"ldx",
"in",
"xrange",
"(",
"len",
"(",
"clusts",
")",
")",
":",
"## new alignment string for read1s and read2s",
"aligned",
"=",
"[",
"]",
"istack",
"=",
"[",
"]",
"lines",
"=",
"clusts",
"[",
"ldx",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"names",
"=",
"lines",
"[",
":",
":",
"2",
"]",
"seqs",
"=",
"lines",
"[",
"1",
":",
":",
"2",
"]",
"align1",
"=",
"\"\"",
"align2",
"=",
"\"\"",
"## we don't allow seeds with no hits to make it here, currently",
"#if len(names) == 1:",
"# aligned.append(clusts[ldx].replace(\">\", \"\").strip())",
"## find duplicates and skip aligning but keep it for downstream.",
"if",
"len",
"(",
"names",
")",
"!=",
"len",
"(",
"set",
"(",
"[",
"x",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"names",
"]",
")",
")",
":",
"duples",
"[",
"ldx",
"]",
"=",
"1",
"istack",
"=",
"[",
"\"{}\\n{}\"",
".",
"format",
"(",
"i",
"[",
"1",
":",
"]",
",",
"j",
")",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"names",
",",
"seqs",
")",
"]",
"#aligned.append(clusts[ldx].replace(\">\", \"\").strip())",
"else",
":",
"## append counter to names because muscle doesn't retain order",
"names",
"=",
"[",
"\">{};*{}\"",
".",
"format",
"(",
"j",
"[",
"1",
":",
"]",
",",
"i",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"names",
")",
"]",
"try",
":",
"## try to split names on nnnn splitter",
"clust1",
",",
"clust2",
"=",
"zip",
"(",
"*",
"[",
"i",
".",
"split",
"(",
"\"nnnn\"",
")",
"for",
"i",
"in",
"seqs",
"]",
")",
"## make back into strings",
"cl1",
"=",
"\"\\n\"",
".",
"join",
"(",
"itertools",
".",
"chain",
"(",
"*",
"zip",
"(",
"names",
",",
"clust1",
")",
")",
")",
"cl2",
"=",
"\"\\n\"",
".",
"join",
"(",
"itertools",
".",
"chain",
"(",
"*",
"zip",
"(",
"names",
",",
"clust2",
")",
")",
")",
"## store allele (lowercase) info",
"shape",
"=",
"(",
"len",
"(",
"seqs",
")",
",",
"max",
"(",
"[",
"len",
"(",
"i",
")",
"for",
"i",
"in",
"seqs",
"]",
")",
")",
"arrseqs",
"=",
"np",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"\"S1\"",
")",
"for",
"row",
"in",
"range",
"(",
"arrseqs",
".",
"shape",
"[",
"0",
"]",
")",
":",
"seqsrow",
"=",
"seqs",
"[",
"row",
"]",
"arrseqs",
"[",
"row",
",",
":",
"len",
"(",
"seqsrow",
")",
"]",
"=",
"list",
"(",
"seqsrow",
")",
"amask",
"=",
"np",
".",
"char",
".",
"islower",
"(",
"arrseqs",
")",
"save_alleles",
"=",
"np",
".",
"any",
"(",
"amask",
")",
"## send align1 to the bash shell",
"## TODO: check for pipe-overflow here and use files for i/o ",
"cmd1",
"=",
"\"echo -e '{}' | {} -quiet -in - ; echo {}\"",
".",
"format",
"(",
"cl1",
",",
"ipyrad",
".",
"bins",
".",
"muscle",
",",
"\"//\"",
")",
"print",
"(",
"cmd1",
",",
"file",
"=",
"proc",
".",
"stdin",
")",
"## read the stdout by line until splitter is reached",
"for",
"line",
"in",
"iter",
"(",
"proc",
".",
"stdout",
".",
"readline",
",",
"\"//\\n\"",
")",
":",
"align1",
"+=",
"line",
"## send align2 to the bash shell",
"## TODO: check for pipe-overflow here and use files for i/o ",
"cmd2",
"=",
"\"echo -e '{}' | {} -quiet -in - ; echo {}\"",
".",
"format",
"(",
"cl2",
",",
"ipyrad",
".",
"bins",
".",
"muscle",
",",
"\"//\"",
")",
"print",
"(",
"cmd2",
",",
"file",
"=",
"proc",
".",
"stdin",
")",
"## read the stdout by line until splitter is reached",
"for",
"line",
"in",
"iter",
"(",
"proc",
".",
"stdout",
".",
"readline",
",",
"\"//\\n\"",
")",
":",
"align2",
"+=",
"line",
"## join the aligned read1 and read2 and ensure name order match",
"la1",
"=",
"align1",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\"\\n>\"",
")",
"la2",
"=",
"align2",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\"\\n>\"",
")",
"dalign1",
"=",
"dict",
"(",
"[",
"i",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"for",
"i",
"in",
"la1",
"]",
")",
"dalign2",
"=",
"dict",
"(",
"[",
"i",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"for",
"i",
"in",
"la2",
"]",
")",
"keys",
"=",
"sorted",
"(",
"dalign1",
".",
"keys",
"(",
")",
",",
"key",
"=",
"DEREP",
")",
"keys2",
"=",
"sorted",
"(",
"dalign2",
".",
"keys",
"(",
")",
",",
"key",
"=",
"DEREP",
")",
"## Make sure R1 and R2 actually exist for each sample. If not",
"## bail out of this cluster.",
"if",
"not",
"len",
"(",
"keys",
")",
"==",
"len",
"(",
"keys2",
")",
":",
"LOGGER",
".",
"error",
"(",
"\"R1 and R2 results differ in length: \"",
"+",
"\"\\nR1 - {}\\nR2 - {}\"",
".",
"format",
"(",
"keys",
",",
"keys2",
")",
")",
"continue",
"## impute allele (lowercase) info back into alignments",
"for",
"kidx",
",",
"key",
"in",
"enumerate",
"(",
"keys",
")",
":",
"concatseq",
"=",
"dalign1",
"[",
"key",
"]",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"+",
"\"nnnn\"",
"+",
"dalign2",
"[",
"key",
"]",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"## impute alleles",
"if",
"save_alleles",
":",
"newmask",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"concatseq",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"## check for indels and impute to amask",
"indidx",
"=",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"list",
"(",
"concatseq",
")",
")",
"==",
"\"-\"",
")",
"[",
"0",
"]",
"if",
"indidx",
".",
"size",
":",
"allrows",
"=",
"np",
".",
"arange",
"(",
"amask",
".",
"shape",
"[",
"1",
"]",
")",
"mask",
"=",
"np",
".",
"ones",
"(",
"allrows",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"for",
"idx",
"in",
"indidx",
":",
"if",
"idx",
"<",
"mask",
".",
"shape",
"[",
"0",
"]",
":",
"mask",
"[",
"idx",
"]",
"=",
"False",
"not_idx",
"=",
"allrows",
"[",
"mask",
"==",
"1",
"]",
"## fill in new data into all other spots",
"newmask",
"[",
"not_idx",
"]",
"=",
"amask",
"[",
"kidx",
",",
":",
"not_idx",
".",
"shape",
"[",
"0",
"]",
"]",
"else",
":",
"newmask",
"=",
"amask",
"[",
"kidx",
"]",
"## lower the alleles",
"concatarr",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"concatseq",
")",
")",
"concatarr",
"[",
"newmask",
"]",
"=",
"np",
".",
"char",
".",
"lower",
"(",
"concatarr",
"[",
"newmask",
"]",
")",
"concatseq",
"=",
"concatarr",
".",
"tostring",
"(",
")",
"#LOGGER.info(concatseq)",
"## fill list with aligned data",
"aligned",
".",
"append",
"(",
"\"{}\\n{}\"",
".",
"format",
"(",
"key",
",",
"concatseq",
")",
")",
"## put into a dict for writing to file",
"#aligned = []",
"#for key in keys:",
"# aligned.append(\"\\n\".join(",
"# [key, ",
"# dalign1[key].replace(\"\\n\", \"\")+\"nnnn\"+\\",
"# dalign2[key].replace(\"\\n\", \"\")]))",
"except",
"IndexError",
"as",
"inst",
":",
"LOGGER",
".",
"debug",
"(",
"\"Error in PE - ldx: {}\"",
".",
"format",
"(",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"Vars: {}\"",
".",
"format",
"(",
"dict",
"(",
"globals",
"(",
")",
",",
"*",
"*",
"locals",
"(",
")",
")",
")",
")",
"raise",
"except",
"ValueError",
":",
"## make back into strings",
"cl1",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"\\n\"",
".",
"join",
"(",
"i",
")",
"for",
"i",
"in",
"zip",
"(",
"names",
",",
"seqs",
")",
"]",
")",
"## store allele (lowercase) info",
"shape",
"=",
"(",
"len",
"(",
"seqs",
")",
",",
"max",
"(",
"[",
"len",
"(",
"i",
")",
"for",
"i",
"in",
"seqs",
"]",
")",
")",
"arrseqs",
"=",
"np",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"\"S1\"",
")",
"for",
"row",
"in",
"range",
"(",
"arrseqs",
".",
"shape",
"[",
"0",
"]",
")",
":",
"seqsrow",
"=",
"seqs",
"[",
"row",
"]",
"arrseqs",
"[",
"row",
",",
":",
"len",
"(",
"seqsrow",
")",
"]",
"=",
"list",
"(",
"seqsrow",
")",
"amask",
"=",
"np",
".",
"char",
".",
"islower",
"(",
"arrseqs",
")",
"save_alleles",
"=",
"np",
".",
"any",
"(",
"amask",
")",
"## send align1 to the bash shell (TODO: check for pipe-overflow)",
"cmd1",
"=",
"\"echo -e '{}' | {} -quiet -in - ; echo {}\"",
".",
"format",
"(",
"cl1",
",",
"ipyrad",
".",
"bins",
".",
"muscle",
",",
"\"//\"",
")",
"print",
"(",
"cmd1",
",",
"file",
"=",
"proc",
".",
"stdin",
")",
"## read the stdout by line until splitter is reached",
"for",
"line",
"in",
"iter",
"(",
"proc",
".",
"stdout",
".",
"readline",
",",
"\"//\\n\"",
")",
":",
"align1",
"+=",
"line",
"## ensure name order match",
"la1",
"=",
"align1",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\"\\n>\"",
")",
"dalign1",
"=",
"dict",
"(",
"[",
"i",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"for",
"i",
"in",
"la1",
"]",
")",
"keys",
"=",
"sorted",
"(",
"dalign1",
".",
"keys",
"(",
")",
",",
"key",
"=",
"DEREP",
")",
"## put into dict for writing to file",
"for",
"kidx",
",",
"key",
"in",
"enumerate",
"(",
"keys",
")",
":",
"concatseq",
"=",
"dalign1",
"[",
"key",
"]",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"## impute alleles",
"if",
"save_alleles",
":",
"newmask",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"concatseq",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"## check for indels and impute to amask",
"indidx",
"=",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"list",
"(",
"concatseq",
")",
")",
"==",
"\"-\"",
")",
"[",
"0",
"]",
"if",
"indidx",
".",
"size",
":",
"allrows",
"=",
"np",
".",
"arange",
"(",
"amask",
".",
"shape",
"[",
"1",
"]",
")",
"mask",
"=",
"np",
".",
"ones",
"(",
"allrows",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"for",
"idx",
"in",
"indidx",
":",
"if",
"idx",
"<",
"mask",
".",
"shape",
"[",
"0",
"]",
":",
"mask",
"[",
"idx",
"]",
"=",
"False",
"not_idx",
"=",
"allrows",
"[",
"mask",
"==",
"1",
"]",
"## fill in new data into all other spots",
"newmask",
"[",
"not_idx",
"]",
"=",
"amask",
"[",
"kidx",
",",
":",
"not_idx",
".",
"shape",
"[",
"0",
"]",
"]",
"else",
":",
"newmask",
"=",
"amask",
"[",
"kidx",
"]",
"## lower the alleles",
"concatarr",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"concatseq",
")",
")",
"concatarr",
"[",
"newmask",
"]",
"=",
"np",
".",
"char",
".",
"lower",
"(",
"concatarr",
"[",
"newmask",
"]",
")",
"concatseq",
"=",
"concatarr",
".",
"tostring",
"(",
")",
"## fill list with aligned data",
"aligned",
".",
"append",
"(",
"\"{}\\n{}\"",
".",
"format",
"(",
"key",
",",
"concatseq",
")",
")",
"## put aligned locus in list",
"#aligned.append(\"\\n\".join(inner_aligned))",
"## enforce maxlen on aligned seqs",
"aseqs",
"=",
"np",
".",
"vstack",
"(",
"[",
"list",
"(",
"i",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"1",
"]",
")",
"for",
"i",
"in",
"aligned",
"]",
")",
"LOGGER",
".",
"info",
"(",
"\"\\naseqs here: %s\"",
",",
"aseqs",
")",
"## index names by snames order",
"sidxs",
"=",
"[",
"snames",
".",
"index",
"(",
"key",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"0",
"]",
")",
"for",
"key",
"in",
"keys",
"]",
"thislen",
"=",
"min",
"(",
"maxlen",
",",
"aseqs",
".",
"shape",
"[",
"1",
"]",
")",
"for",
"idx",
"in",
"xrange",
"(",
"aseqs",
".",
"shape",
"[",
"0",
"]",
")",
":",
"## enter into stack",
"newn",
"=",
"aligned",
"[",
"idx",
"]",
".",
"split",
"(",
"\";\"",
",",
"1",
")",
"[",
"0",
"]",
"#newn = key[idx].split(\";\", 1)[0]",
"istack",
".",
"append",
"(",
"\"{}\\n{}\"",
".",
"format",
"(",
"newn",
",",
"aseqs",
"[",
"idx",
",",
":",
"thislen",
"]",
".",
"tostring",
"(",
")",
")",
")",
"## name index in sorted list (indels order)",
"sidx",
"=",
"sidxs",
"[",
"idx",
"]",
"indels",
"[",
"sidx",
",",
"ldx",
",",
":",
"thislen",
"]",
"=",
"aseqs",
"[",
"idx",
",",
":",
"thislen",
"]",
"==",
"\"-\"",
"if",
"istack",
":",
"allstack",
".",
"append",
"(",
"\"\\n\"",
".",
"join",
"(",
"istack",
")",
")",
"#LOGGER.debug(\"\\n\\nSTACK (%s)\\n%s\\n\", duples[ldx], \"\\n\".join(istack))",
"## cleanup",
"proc",
".",
"stdout",
".",
"close",
"(",
")",
"if",
"proc",
".",
"stderr",
":",
"proc",
".",
"stderr",
".",
"close",
"(",
")",
"proc",
".",
"stdin",
".",
"close",
"(",
")",
"proc",
".",
"wait",
"(",
")",
"#LOGGER.info(\"\\n\\nALLSTACK %s\\n\", \"\\n\".join(i) for i in allstack[:5]])",
"## write to file after",
"odx",
"=",
"chunk",
".",
"rsplit",
"(",
"\"_\"",
")",
"[",
"-",
"1",
"]",
"alignfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"align_{}.fa\"",
".",
"format",
"(",
"odx",
")",
")",
"with",
"open",
"(",
"alignfile",
",",
"'wb'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n//\\n//\\n\"",
".",
"join",
"(",
"allstack",
")",
"+",
"\"\\n\"",
")",
"os",
".",
"remove",
"(",
"chunk",
")",
"## save indels array to tmp dir",
"ifile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"indels_{}.tmp.npy\"",
".",
"format",
"(",
"odx",
")",
")",
"np",
".",
"save",
"(",
"ifile",
",",
"indels",
")",
"dfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"duples_{}.tmp.npy\"",
".",
"format",
"(",
"odx",
")",
")",
"np",
".",
"save",
"(",
"dfile",
",",
"duples",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
multi_muscle_align
|
Sends the cluster bits to nprocessors for muscle alignment. They return
with indel.h5 handles to be concatenated into a joint h5.
|
ipyrad/assemble/cluster_across.py
|
def multi_muscle_align(data, samples, ipyclient):
"""
Sends the cluster bits to nprocessors for muscle alignment. They return
with indel.h5 handles to be concatenated into a joint h5.
"""
LOGGER.info("starting alignments")
## get client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " aligning clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## submit clustbits as jobs to engines. The chunkfiles are removed when they
## are finished so this job can even be restarted if it was half finished,
## though that is probably rare.
path = os.path.join(data.tmpdir, data.name + ".chunk_*")
clustbits = glob.glob(path)
jobs = {}
for idx in xrange(len(clustbits)):
args = [data, samples, clustbits[idx]]
jobs[idx] = lbview.apply(persistent_popen_align3, *args)
allwait = len(jobs)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## print progress while bits are aligning
while 1:
finished = [i.ready() for i in jobs.values()]
fwait = sum(finished)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(allwait, fwait, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(finished):
break
## check for errors in muscle_align_across
keys = jobs.keys()
for idx in keys:
if not jobs[idx].successful():
LOGGER.error("error in persistent_popen_align %s", jobs[idx].exception())
raise IPyradWarningExit("error in step 6 {}".format(jobs[idx].exception()))
del jobs[idx]
print("")
|
def multi_muscle_align(data, samples, ipyclient):
"""
Sends the cluster bits to nprocessors for muscle alignment. They return
with indel.h5 handles to be concatenated into a joint h5.
"""
LOGGER.info("starting alignments")
## get client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " aligning clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## submit clustbits as jobs to engines. The chunkfiles are removed when they
## are finished so this job can even be restarted if it was half finished,
## though that is probably rare.
path = os.path.join(data.tmpdir, data.name + ".chunk_*")
clustbits = glob.glob(path)
jobs = {}
for idx in xrange(len(clustbits)):
args = [data, samples, clustbits[idx]]
jobs[idx] = lbview.apply(persistent_popen_align3, *args)
allwait = len(jobs)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## print progress while bits are aligning
while 1:
finished = [i.ready() for i in jobs.values()]
fwait = sum(finished)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(allwait, fwait, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(finished):
break
## check for errors in muscle_align_across
keys = jobs.keys()
for idx in keys:
if not jobs[idx].successful():
LOGGER.error("error in persistent_popen_align %s", jobs[idx].exception())
raise IPyradWarningExit("error in step 6 {}".format(jobs[idx].exception()))
del jobs[idx]
print("")
|
[
"Sends",
"the",
"cluster",
"bits",
"to",
"nprocessors",
"for",
"muscle",
"alignment",
".",
"They",
"return",
"with",
"indel",
".",
"h5",
"handles",
"to",
"be",
"concatenated",
"into",
"a",
"joint",
"h5",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L294-L338
|
[
"def",
"multi_muscle_align",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"starting alignments\"",
")",
"## get client",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" aligning clusters | {} | s6 |\"",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"20",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"## submit clustbits as jobs to engines. The chunkfiles are removed when they",
"## are finished so this job can even be restarted if it was half finished, ",
"## though that is probably rare. ",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"data",
".",
"name",
"+",
"\".chunk_*\"",
")",
"clustbits",
"=",
"glob",
".",
"glob",
"(",
"path",
")",
"jobs",
"=",
"{",
"}",
"for",
"idx",
"in",
"xrange",
"(",
"len",
"(",
"clustbits",
")",
")",
":",
"args",
"=",
"[",
"data",
",",
"samples",
",",
"clustbits",
"[",
"idx",
"]",
"]",
"jobs",
"[",
"idx",
"]",
"=",
"lbview",
".",
"apply",
"(",
"persistent_popen_align3",
",",
"*",
"args",
")",
"allwait",
"=",
"len",
"(",
"jobs",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"20",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"## print progress while bits are aligning",
"while",
"1",
":",
"finished",
"=",
"[",
"i",
".",
"ready",
"(",
")",
"for",
"i",
"in",
"jobs",
".",
"values",
"(",
")",
"]",
"fwait",
"=",
"sum",
"(",
"finished",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"allwait",
",",
"fwait",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"all",
"(",
"finished",
")",
":",
"break",
"## check for errors in muscle_align_across",
"keys",
"=",
"jobs",
".",
"keys",
"(",
")",
"for",
"idx",
"in",
"keys",
":",
"if",
"not",
"jobs",
"[",
"idx",
"]",
".",
"successful",
"(",
")",
":",
"LOGGER",
".",
"error",
"(",
"\"error in persistent_popen_align %s\"",
",",
"jobs",
"[",
"idx",
"]",
".",
"exception",
"(",
")",
")",
"raise",
"IPyradWarningExit",
"(",
"\"error in step 6 {}\"",
".",
"format",
"(",
"jobs",
"[",
"idx",
"]",
".",
"exception",
"(",
")",
")",
")",
"del",
"jobs",
"[",
"idx",
"]",
"print",
"(",
"\"\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
concatclusts
|
concatenates sorted aligned cluster tmpfiles and removes them.
|
ipyrad/assemble/cluster_across.py
|
def concatclusts(outhandle, alignbits):
""" concatenates sorted aligned cluster tmpfiles and removes them."""
with gzip.open(outhandle, 'wb') as out:
for fname in alignbits:
with open(fname) as infile:
out.write(infile.read()+"//\n//\n")
|
def concatclusts(outhandle, alignbits):
""" concatenates sorted aligned cluster tmpfiles and removes them."""
with gzip.open(outhandle, 'wb') as out:
for fname in alignbits:
with open(fname) as infile:
out.write(infile.read()+"//\n//\n")
|
[
"concatenates",
"sorted",
"aligned",
"cluster",
"tmpfiles",
"and",
"removes",
"them",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L342-L347
|
[
"def",
"concatclusts",
"(",
"outhandle",
",",
"alignbits",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"outhandle",
",",
"'wb'",
")",
"as",
"out",
":",
"for",
"fname",
"in",
"alignbits",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"infile",
":",
"out",
".",
"write",
"(",
"infile",
".",
"read",
"(",
")",
"+",
"\"//\\n//\\n\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
build_indels
|
Builds the indels array and catclust.gz file from the aligned clusters.
Building catclust is very fast. Entering indels into h5 array is a bit
slow but can probably be sped up. (todo). NOT currently parallelized.
|
ipyrad/assemble/cluster_across.py
|
def build_indels(data, samples, ipyclient):
"""
Builds the indels array and catclust.gz file from the aligned clusters.
Building catclust is very fast. Entering indels into h5 array is a bit
slow but can probably be sped up. (todo). NOT currently parallelized.
"""
## progress bars
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " database indels | {} | s6 |"
njobs = len(glob.glob(os.path.join(data.tmpdir, "align_*.fa"))) + 1
## build tmparrs
async = lbview.apply(build_tmp_h5, *(data, samples))
## track progress
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
ready = bool(async.ready())
progressbar(njobs, ready, printstr.format(elapsed), spacer=data._spacer)
if ready:
break
else:
time.sleep(0.1)
## check for errors
if not async.successful():
raise IPyradWarningExit(async.result())
## start subfunc
async = lbview.apply(sub_build_indels, *(data, samples))
prog = 1
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if async.stdout:
prog = int(async.stdout.split()[-1])+1
progressbar(njobs, prog, printstr.format(elapsed), spacer=data._spacer)
if async.ready():
break
else:
time.sleep(0.1)
## check for errors
if not async.successful():
raise IPyradWarningExit(async.result())
print("")
## prepare for next substep by removing the singlecat result files if
## they exist.
snames = [i.name for i in samples]
snames.sort()
smpios = [os.path.join(data.dirs.across, i+'.tmp.h5') for i in snames]
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
|
def build_indels(data, samples, ipyclient):
"""
Builds the indels array and catclust.gz file from the aligned clusters.
Building catclust is very fast. Entering indels into h5 array is a bit
slow but can probably be sped up. (todo). NOT currently parallelized.
"""
## progress bars
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " database indels | {} | s6 |"
njobs = len(glob.glob(os.path.join(data.tmpdir, "align_*.fa"))) + 1
## build tmparrs
async = lbview.apply(build_tmp_h5, *(data, samples))
## track progress
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
ready = bool(async.ready())
progressbar(njobs, ready, printstr.format(elapsed), spacer=data._spacer)
if ready:
break
else:
time.sleep(0.1)
## check for errors
if not async.successful():
raise IPyradWarningExit(async.result())
## start subfunc
async = lbview.apply(sub_build_indels, *(data, samples))
prog = 1
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if async.stdout:
prog = int(async.stdout.split()[-1])+1
progressbar(njobs, prog, printstr.format(elapsed), spacer=data._spacer)
if async.ready():
break
else:
time.sleep(0.1)
## check for errors
if not async.successful():
raise IPyradWarningExit(async.result())
print("")
## prepare for next substep by removing the singlecat result files if
## they exist.
snames = [i.name for i in samples]
snames.sort()
smpios = [os.path.join(data.dirs.across, i+'.tmp.h5') for i in snames]
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
|
[
"Builds",
"the",
"indels",
"array",
"and",
"catclust",
".",
"gz",
"file",
"from",
"the",
"aligned",
"clusters",
".",
"Building",
"catclust",
"is",
"very",
"fast",
".",
"Entering",
"indels",
"into",
"h5",
"array",
"is",
"a",
"bit",
"slow",
"but",
"can",
"probably",
"be",
"sped",
"up",
".",
"(",
"todo",
")",
".",
"NOT",
"currently",
"parallelized",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L352-L408
|
[
"def",
"build_indels",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
":",
"## progress bars",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" database indels | {} | s6 |\"",
"njobs",
"=",
"len",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"align_*.fa\"",
")",
")",
")",
"+",
"1",
"## build tmparrs",
"async",
"=",
"lbview",
".",
"apply",
"(",
"build_tmp_h5",
",",
"*",
"(",
"data",
",",
"samples",
")",
")",
"## track progress",
"while",
"1",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"ready",
"=",
"bool",
"(",
"async",
".",
"ready",
"(",
")",
")",
"progressbar",
"(",
"njobs",
",",
"ready",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"if",
"ready",
":",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## check for errors",
"if",
"not",
"async",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"async",
".",
"result",
"(",
")",
")",
"## start subfunc",
"async",
"=",
"lbview",
".",
"apply",
"(",
"sub_build_indels",
",",
"*",
"(",
"data",
",",
"samples",
")",
")",
"prog",
"=",
"1",
"while",
"1",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"if",
"async",
".",
"stdout",
":",
"prog",
"=",
"int",
"(",
"async",
".",
"stdout",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
")",
"+",
"1",
"progressbar",
"(",
"njobs",
",",
"prog",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"if",
"async",
".",
"ready",
"(",
")",
":",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## check for errors",
"if",
"not",
"async",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"async",
".",
"result",
"(",
")",
")",
"print",
"(",
"\"\"",
")",
"## prepare for next substep by removing the singlecat result files if ",
"## they exist. ",
"snames",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"snames",
".",
"sort",
"(",
")",
"smpios",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"i",
"+",
"'.tmp.h5'",
")",
"for",
"i",
"in",
"snames",
"]",
"for",
"smpio",
"in",
"smpios",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"smpio",
")",
":",
"os",
".",
"remove",
"(",
"smpio",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
sub_build_indels
|
sub func in `build_indels()`.
|
ipyrad/assemble/cluster_across.py
|
def sub_build_indels(data, samples):
""" sub func in `build_indels()`. """
## get file handles
indelfiles = glob.glob(os.path.join(data.tmpdir, "indels_*.tmp.npy"))
alignbits = glob.glob(os.path.join(data.tmpdir, "align_*.fa"))
## sort into input order by chunk names
indelfiles.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-8]))
alignbits.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-3]))
LOGGER.info("indelfiles %s", indelfiles)
LOGGER.info("alignbits %s", alignbits)
chunksize = int(indelfiles[0].rsplit("_", 1)[-1][:-8])
## concatenate finished seq clusters into a tmp file
outhandle = os.path.join(data.dirs.across, data.name+"_catclust.gz")
concatclusts(outhandle, alignbits)
## get dims for full indel array
maxlen = data._hackersonly["max_fragment_length"] + 20
nloci = get_nloci(data)
LOGGER.info("maxlen inside build is %s", maxlen)
LOGGER.info("nloci for indels %s", nloci)
## INIT TEMP INDEL ARRAY
## build an indel array for ALL loci in cat.clust.gz,
## chunked so that individual samples can be pulled out
ipath = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
with h5py.File(ipath, 'w') as io5:
iset = io5.create_dataset(
"indels",
shape=(len(samples), nloci, maxlen),
dtype=np.bool_,
chunks=(1, chunksize, maxlen))
## again make sure names are ordered right
samples.sort(key=lambda x: x.name)
#iset.attrs["chunksize"] = (1, data.nloci, maxlen)
iset.attrs["samples"] = [i.name for i in samples]
## enter all tmpindel arrays into full indel array
done = 0
init = 0
for indf in indelfiles:
end = int(indf.rsplit("_", 1)[-1][:-8])
inarr = np.load(indf)
LOGGER.info('inarr shape %s', inarr.shape)
LOGGER.info('iset shape %s', iset.shape)
iset[:, init:end, :] = inarr[:, :end-init]
init += end-init
done += 1
print(done)
|
def sub_build_indels(data, samples):
""" sub func in `build_indels()`. """
## get file handles
indelfiles = glob.glob(os.path.join(data.tmpdir, "indels_*.tmp.npy"))
alignbits = glob.glob(os.path.join(data.tmpdir, "align_*.fa"))
## sort into input order by chunk names
indelfiles.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-8]))
alignbits.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-3]))
LOGGER.info("indelfiles %s", indelfiles)
LOGGER.info("alignbits %s", alignbits)
chunksize = int(indelfiles[0].rsplit("_", 1)[-1][:-8])
## concatenate finished seq clusters into a tmp file
outhandle = os.path.join(data.dirs.across, data.name+"_catclust.gz")
concatclusts(outhandle, alignbits)
## get dims for full indel array
maxlen = data._hackersonly["max_fragment_length"] + 20
nloci = get_nloci(data)
LOGGER.info("maxlen inside build is %s", maxlen)
LOGGER.info("nloci for indels %s", nloci)
## INIT TEMP INDEL ARRAY
## build an indel array for ALL loci in cat.clust.gz,
## chunked so that individual samples can be pulled out
ipath = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
with h5py.File(ipath, 'w') as io5:
iset = io5.create_dataset(
"indels",
shape=(len(samples), nloci, maxlen),
dtype=np.bool_,
chunks=(1, chunksize, maxlen))
## again make sure names are ordered right
samples.sort(key=lambda x: x.name)
#iset.attrs["chunksize"] = (1, data.nloci, maxlen)
iset.attrs["samples"] = [i.name for i in samples]
## enter all tmpindel arrays into full indel array
done = 0
init = 0
for indf in indelfiles:
end = int(indf.rsplit("_", 1)[-1][:-8])
inarr = np.load(indf)
LOGGER.info('inarr shape %s', inarr.shape)
LOGGER.info('iset shape %s', iset.shape)
iset[:, init:end, :] = inarr[:, :end-init]
init += end-init
done += 1
print(done)
|
[
"sub",
"func",
"in",
"build_indels",
"()",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L412-L464
|
[
"def",
"sub_build_indels",
"(",
"data",
",",
"samples",
")",
":",
"## get file handles",
"indelfiles",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"indels_*.tmp.npy\"",
")",
")",
"alignbits",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"align_*.fa\"",
")",
")",
"## sort into input order by chunk names",
"indelfiles",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"int",
"(",
"x",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"8",
"]",
")",
")",
"alignbits",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"int",
"(",
"x",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"3",
"]",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"indelfiles %s\"",
",",
"indelfiles",
")",
"LOGGER",
".",
"info",
"(",
"\"alignbits %s\"",
",",
"alignbits",
")",
"chunksize",
"=",
"int",
"(",
"indelfiles",
"[",
"0",
"]",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"8",
"]",
")",
"## concatenate finished seq clusters into a tmp file",
"outhandle",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catclust.gz\"",
")",
"concatclusts",
"(",
"outhandle",
",",
"alignbits",
")",
"## get dims for full indel array",
"maxlen",
"=",
"data",
".",
"_hackersonly",
"[",
"\"max_fragment_length\"",
"]",
"+",
"20",
"nloci",
"=",
"get_nloci",
"(",
"data",
")",
"LOGGER",
".",
"info",
"(",
"\"maxlen inside build is %s\"",
",",
"maxlen",
")",
"LOGGER",
".",
"info",
"(",
"\"nloci for indels %s\"",
",",
"nloci",
")",
"## INIT TEMP INDEL ARRAY",
"## build an indel array for ALL loci in cat.clust.gz,",
"## chunked so that individual samples can be pulled out",
"ipath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmp.indels.hdf5\"",
")",
"with",
"h5py",
".",
"File",
"(",
"ipath",
",",
"'w'",
")",
"as",
"io5",
":",
"iset",
"=",
"io5",
".",
"create_dataset",
"(",
"\"indels\"",
",",
"shape",
"=",
"(",
"len",
"(",
"samples",
")",
",",
"nloci",
",",
"maxlen",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
",",
"chunks",
"=",
"(",
"1",
",",
"chunksize",
",",
"maxlen",
")",
")",
"## again make sure names are ordered right",
"samples",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"name",
")",
"#iset.attrs[\"chunksize\"] = (1, data.nloci, maxlen)",
"iset",
".",
"attrs",
"[",
"\"samples\"",
"]",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"## enter all tmpindel arrays into full indel array",
"done",
"=",
"0",
"init",
"=",
"0",
"for",
"indf",
"in",
"indelfiles",
":",
"end",
"=",
"int",
"(",
"indf",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"8",
"]",
")",
"inarr",
"=",
"np",
".",
"load",
"(",
"indf",
")",
"LOGGER",
".",
"info",
"(",
"'inarr shape %s'",
",",
"inarr",
".",
"shape",
")",
"LOGGER",
".",
"info",
"(",
"'iset shape %s'",
",",
"iset",
".",
"shape",
")",
"iset",
"[",
":",
",",
"init",
":",
"end",
",",
":",
"]",
"=",
"inarr",
"[",
":",
",",
":",
"end",
"-",
"init",
"]",
"init",
"+=",
"end",
"-",
"init",
"done",
"+=",
"1",
"print",
"(",
"done",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
call_cluster
|
distributes 'cluster()' function to an ipyclient to make sure it runs
on a high memory node.
|
ipyrad/assemble/cluster_across.py
|
def call_cluster(data, noreverse, ipyclient):
"""
distributes 'cluster()' function to an ipyclient to make sure it runs
on a high memory node.
"""
## Find host with the most engines, for now just using first.
lbview = ipyclient.load_balanced_view()
## request engine data, skips busy engines.
asyncs = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
asyncs[eid] = engine.apply(socket.gethostname)
## get results
hosts = {}
for key in asyncs:
hosts[key] = asyncs[key].get()
## count them
results = {}
for eid, hostname in hosts.items():
if hostname in results:
results[hostname].append(eid)
else:
results[hostname] = [eid]
## which is largest
hosts = sorted(results.items(), key=lambda x: len(x[1]), reverse=True)
_, eids = hosts[0]
bighost = ipyclient[eids[0]]
## nthreads is len eids, or ipcluster.threads, unless ipcluster.threads
## is really small, then we assume threads should not apply here.
## ipyrad -p params.txt -s 6 -c 20 would give:
## min(20, max(2, 10)) = 8
## while
## ipyrad -p params.txt -s 6 -c 20 -t 4 would give:
## min(20, max(4, 10)) = 10
## and
## ipyrad -p params.txt -s 6 -c 20 -t 15 would give:
## min(20, max(15, 10)) = 15
## and
## ipyrad -p params.txt -s 6 -c 16 --MPI (on 2 X 8-core nodes) would give:
## min(8, max(2, 10)) = 8
nthreads = min(len(eids), max(data._ipcluster["threads"], 10))
## submit job to the host with the most
async = bighost.apply(cluster, *(data, noreverse, nthreads))
#async = lbview.apply(cluster, *(data, noreverse, nthreads))
## track progress
prog = 0
start = time.time()
printstr = " clustering across | {} | s6 |"
while 1:
if async.stdout:
prog = int(async.stdout.split()[-1])
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(100, prog, printstr.format(elapsed), spacer=data._spacer)
if async.ready():
progressbar(100, prog, printstr.format(elapsed), spacer=data._spacer)
print("")
break
else:
time.sleep(0.5)
## store log result
ipyclient.wait()
data.stats_files.s6 = os.path.join(data.dirs.across, "s6_cluster_stats.txt")
|
def call_cluster(data, noreverse, ipyclient):
"""
distributes 'cluster()' function to an ipyclient to make sure it runs
on a high memory node.
"""
## Find host with the most engines, for now just using first.
lbview = ipyclient.load_balanced_view()
## request engine data, skips busy engines.
asyncs = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
asyncs[eid] = engine.apply(socket.gethostname)
## get results
hosts = {}
for key in asyncs:
hosts[key] = asyncs[key].get()
## count them
results = {}
for eid, hostname in hosts.items():
if hostname in results:
results[hostname].append(eid)
else:
results[hostname] = [eid]
## which is largest
hosts = sorted(results.items(), key=lambda x: len(x[1]), reverse=True)
_, eids = hosts[0]
bighost = ipyclient[eids[0]]
## nthreads is len eids, or ipcluster.threads, unless ipcluster.threads
## is really small, then we assume threads should not apply here.
## ipyrad -p params.txt -s 6 -c 20 would give:
## min(20, max(2, 10)) = 8
## while
## ipyrad -p params.txt -s 6 -c 20 -t 4 would give:
## min(20, max(4, 10)) = 10
## and
## ipyrad -p params.txt -s 6 -c 20 -t 15 would give:
## min(20, max(15, 10)) = 15
## and
## ipyrad -p params.txt -s 6 -c 16 --MPI (on 2 X 8-core nodes) would give:
## min(8, max(2, 10)) = 8
nthreads = min(len(eids), max(data._ipcluster["threads"], 10))
## submit job to the host with the most
async = bighost.apply(cluster, *(data, noreverse, nthreads))
#async = lbview.apply(cluster, *(data, noreverse, nthreads))
## track progress
prog = 0
start = time.time()
printstr = " clustering across | {} | s6 |"
while 1:
if async.stdout:
prog = int(async.stdout.split()[-1])
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(100, prog, printstr.format(elapsed), spacer=data._spacer)
if async.ready():
progressbar(100, prog, printstr.format(elapsed), spacer=data._spacer)
print("")
break
else:
time.sleep(0.5)
## store log result
ipyclient.wait()
data.stats_files.s6 = os.path.join(data.dirs.across, "s6_cluster_stats.txt")
|
[
"distributes",
"cluster",
"()",
"function",
"to",
"an",
"ipyclient",
"to",
"make",
"sure",
"it",
"runs",
"on",
"a",
"high",
"memory",
"node",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L468-L537
|
[
"def",
"call_cluster",
"(",
"data",
",",
"noreverse",
",",
"ipyclient",
")",
":",
"## Find host with the most engines, for now just using first.",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"## request engine data, skips busy engines. ",
"asyncs",
"=",
"{",
"}",
"for",
"eid",
"in",
"ipyclient",
".",
"ids",
":",
"engine",
"=",
"ipyclient",
"[",
"eid",
"]",
"if",
"not",
"engine",
".",
"outstanding",
":",
"asyncs",
"[",
"eid",
"]",
"=",
"engine",
".",
"apply",
"(",
"socket",
".",
"gethostname",
")",
"## get results",
"hosts",
"=",
"{",
"}",
"for",
"key",
"in",
"asyncs",
":",
"hosts",
"[",
"key",
"]",
"=",
"asyncs",
"[",
"key",
"]",
".",
"get",
"(",
")",
"## count them",
"results",
"=",
"{",
"}",
"for",
"eid",
",",
"hostname",
"in",
"hosts",
".",
"items",
"(",
")",
":",
"if",
"hostname",
"in",
"results",
":",
"results",
"[",
"hostname",
"]",
".",
"append",
"(",
"eid",
")",
"else",
":",
"results",
"[",
"hostname",
"]",
"=",
"[",
"eid",
"]",
"## which is largest",
"hosts",
"=",
"sorted",
"(",
"results",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
"[",
"1",
"]",
")",
",",
"reverse",
"=",
"True",
")",
"_",
",",
"eids",
"=",
"hosts",
"[",
"0",
"]",
"bighost",
"=",
"ipyclient",
"[",
"eids",
"[",
"0",
"]",
"]",
"## nthreads is len eids, or ipcluster.threads, unless ipcluster.threads ",
"## is really small, then we assume threads should not apply here.",
"## ipyrad -p params.txt -s 6 -c 20 would give:",
"## min(20, max(2, 10)) = 8",
"## while ",
"## ipyrad -p params.txt -s 6 -c 20 -t 4 would give:",
"## min(20, max(4, 10)) = 10",
"## and ",
"## ipyrad -p params.txt -s 6 -c 20 -t 15 would give:",
"## min(20, max(15, 10)) = 15",
"## and",
"## ipyrad -p params.txt -s 6 -c 16 --MPI (on 2 X 8-core nodes) would give:",
"## min(8, max(2, 10)) = 8",
"nthreads",
"=",
"min",
"(",
"len",
"(",
"eids",
")",
",",
"max",
"(",
"data",
".",
"_ipcluster",
"[",
"\"threads\"",
"]",
",",
"10",
")",
")",
"## submit job to the host with the most",
"async",
"=",
"bighost",
".",
"apply",
"(",
"cluster",
",",
"*",
"(",
"data",
",",
"noreverse",
",",
"nthreads",
")",
")",
"#async = lbview.apply(cluster, *(data, noreverse, nthreads))",
"## track progress",
"prog",
"=",
"0",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" clustering across | {} | s6 |\"",
"while",
"1",
":",
"if",
"async",
".",
"stdout",
":",
"prog",
"=",
"int",
"(",
"async",
".",
"stdout",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"100",
",",
"prog",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"if",
"async",
".",
"ready",
"(",
")",
":",
"progressbar",
"(",
"100",
",",
"prog",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"print",
"(",
"\"\"",
")",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.5",
")",
"## store log result",
"ipyclient",
".",
"wait",
"(",
")",
"data",
".",
"stats_files",
".",
"s6",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"\"s6_cluster_stats.txt\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
cluster
|
Calls vsearch for clustering across samples.
|
ipyrad/assemble/cluster_across.py
|
def cluster(data, noreverse, nthreads):
"""
Calls vsearch for clustering across samples.
"""
## input and output file handles
cathaplos = os.path.join(data.dirs.across, data.name+"_catshuf.tmp")
uhaplos = os.path.join(data.dirs.across, data.name+".utemp")
hhaplos = os.path.join(data.dirs.across, data.name+".htemp")
logfile = os.path.join(data.dirs.across, "s6_cluster_stats.txt")
## parameters that vary by datatype
## (too low of cov values yield too many poor alignments)
strand = "plus"
cov = 0.75 ##0.90
if data.paramsdict["datatype"] in ["gbs", "2brad"]:
strand = "both"
cov = 0.60
elif data.paramsdict["datatype"] == "pairgbs":
strand = "both"
cov = 0.75 ##0.90
## nthreads is calculated in 'call_cluster()'
cmd = [ipyrad.bins.vsearch,
"-cluster_smallmem", cathaplos,
"-strand", strand,
"-query_cov", str(cov),
"-minsl", str(0.5),
"-id", str(data.paramsdict["clust_threshold"]),
"-userout", uhaplos,
"-notmatched", hhaplos,
"-userfields", "query+target+qstrand",
"-maxaccepts", "1",
"-maxrejects", "0",
"-fasta_width", "0",
"-threads", str(nthreads), #"0",
"-fulldp",
"-usersort",
"-log", logfile]
## override reverse clustering option
if noreverse:
strand = "plus" # -leftjust "
try:
## this seems to start vsearch on a different pid than the engine
## and so it's hard to kill...
LOGGER.info(cmd)
(dog, owner) = pty.openpty()
proc = sps.Popen(cmd, stdout=owner, stderr=owner, close_fds=True)
prog = 0
newprog = 0
while 1:
isdat = select.select([dog], [], [], 0)
if isdat[0]:
dat = os.read(dog, 80192)
else:
dat = ""
if "Clustering" in dat:
try:
newprog = int(dat.split()[-1][:-1])
## may raise value error when it gets to the end
except ValueError:
pass
## break if done
## catches end chunk of printing if clustering went really fast
elif "Clusters:" in dat:
LOGGER.info("ended vsearch tracking loop")
break
else:
time.sleep(0.1)
## print progress
if newprog != prog:
print(newprog)
prog = newprog
## another catcher to let vsearch cleanup after clustering is done
proc.wait()
print(100)
except KeyboardInterrupt:
LOGGER.info("interrupted vsearch here: %s", proc.pid)
os.kill(proc.pid, 2)
raise KeyboardInterrupt()
except sps.CalledProcessError as inst:
raise IPyradWarningExit("""
Error in vsearch: \n{}\n{}""".format(inst, sps.STDOUT))
except OSError as inst:
raise IPyradWarningExit("""
Failed to allocate pty: \n{}""".format(inst))
finally:
data.stats_files.s6 = logfile
|
def cluster(data, noreverse, nthreads):
"""
Calls vsearch for clustering across samples.
"""
## input and output file handles
cathaplos = os.path.join(data.dirs.across, data.name+"_catshuf.tmp")
uhaplos = os.path.join(data.dirs.across, data.name+".utemp")
hhaplos = os.path.join(data.dirs.across, data.name+".htemp")
logfile = os.path.join(data.dirs.across, "s6_cluster_stats.txt")
## parameters that vary by datatype
## (too low of cov values yield too many poor alignments)
strand = "plus"
cov = 0.75 ##0.90
if data.paramsdict["datatype"] in ["gbs", "2brad"]:
strand = "both"
cov = 0.60
elif data.paramsdict["datatype"] == "pairgbs":
strand = "both"
cov = 0.75 ##0.90
## nthreads is calculated in 'call_cluster()'
cmd = [ipyrad.bins.vsearch,
"-cluster_smallmem", cathaplos,
"-strand", strand,
"-query_cov", str(cov),
"-minsl", str(0.5),
"-id", str(data.paramsdict["clust_threshold"]),
"-userout", uhaplos,
"-notmatched", hhaplos,
"-userfields", "query+target+qstrand",
"-maxaccepts", "1",
"-maxrejects", "0",
"-fasta_width", "0",
"-threads", str(nthreads), #"0",
"-fulldp",
"-usersort",
"-log", logfile]
## override reverse clustering option
if noreverse:
strand = "plus" # -leftjust "
try:
## this seems to start vsearch on a different pid than the engine
## and so it's hard to kill...
LOGGER.info(cmd)
(dog, owner) = pty.openpty()
proc = sps.Popen(cmd, stdout=owner, stderr=owner, close_fds=True)
prog = 0
newprog = 0
while 1:
isdat = select.select([dog], [], [], 0)
if isdat[0]:
dat = os.read(dog, 80192)
else:
dat = ""
if "Clustering" in dat:
try:
newprog = int(dat.split()[-1][:-1])
## may raise value error when it gets to the end
except ValueError:
pass
## break if done
## catches end chunk of printing if clustering went really fast
elif "Clusters:" in dat:
LOGGER.info("ended vsearch tracking loop")
break
else:
time.sleep(0.1)
## print progress
if newprog != prog:
print(newprog)
prog = newprog
## another catcher to let vsearch cleanup after clustering is done
proc.wait()
print(100)
except KeyboardInterrupt:
LOGGER.info("interrupted vsearch here: %s", proc.pid)
os.kill(proc.pid, 2)
raise KeyboardInterrupt()
except sps.CalledProcessError as inst:
raise IPyradWarningExit("""
Error in vsearch: \n{}\n{}""".format(inst, sps.STDOUT))
except OSError as inst:
raise IPyradWarningExit("""
Failed to allocate pty: \n{}""".format(inst))
finally:
data.stats_files.s6 = logfile
|
[
"Calls",
"vsearch",
"for",
"clustering",
"across",
"samples",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L541-L636
|
[
"def",
"cluster",
"(",
"data",
",",
"noreverse",
",",
"nthreads",
")",
":",
"## input and output file handles",
"cathaplos",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catshuf.tmp\"",
")",
"uhaplos",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".utemp\"",
")",
"hhaplos",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".htemp\"",
")",
"logfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"\"s6_cluster_stats.txt\"",
")",
"## parameters that vary by datatype",
"## (too low of cov values yield too many poor alignments)",
"strand",
"=",
"\"plus\"",
"cov",
"=",
"0.75",
"##0.90",
"if",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
"in",
"[",
"\"gbs\"",
",",
"\"2brad\"",
"]",
":",
"strand",
"=",
"\"both\"",
"cov",
"=",
"0.60",
"elif",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
"==",
"\"pairgbs\"",
":",
"strand",
"=",
"\"both\"",
"cov",
"=",
"0.75",
"##0.90",
"## nthreads is calculated in 'call_cluster()'",
"cmd",
"=",
"[",
"ipyrad",
".",
"bins",
".",
"vsearch",
",",
"\"-cluster_smallmem\"",
",",
"cathaplos",
",",
"\"-strand\"",
",",
"strand",
",",
"\"-query_cov\"",
",",
"str",
"(",
"cov",
")",
",",
"\"-minsl\"",
",",
"str",
"(",
"0.5",
")",
",",
"\"-id\"",
",",
"str",
"(",
"data",
".",
"paramsdict",
"[",
"\"clust_threshold\"",
"]",
")",
",",
"\"-userout\"",
",",
"uhaplos",
",",
"\"-notmatched\"",
",",
"hhaplos",
",",
"\"-userfields\"",
",",
"\"query+target+qstrand\"",
",",
"\"-maxaccepts\"",
",",
"\"1\"",
",",
"\"-maxrejects\"",
",",
"\"0\"",
",",
"\"-fasta_width\"",
",",
"\"0\"",
",",
"\"-threads\"",
",",
"str",
"(",
"nthreads",
")",
",",
"#\"0\",",
"\"-fulldp\"",
",",
"\"-usersort\"",
",",
"\"-log\"",
",",
"logfile",
"]",
"## override reverse clustering option",
"if",
"noreverse",
":",
"strand",
"=",
"\"plus\"",
"# -leftjust \"",
"try",
":",
"## this seems to start vsearch on a different pid than the engine",
"## and so it's hard to kill... ",
"LOGGER",
".",
"info",
"(",
"cmd",
")",
"(",
"dog",
",",
"owner",
")",
"=",
"pty",
".",
"openpty",
"(",
")",
"proc",
"=",
"sps",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"owner",
",",
"stderr",
"=",
"owner",
",",
"close_fds",
"=",
"True",
")",
"prog",
"=",
"0",
"newprog",
"=",
"0",
"while",
"1",
":",
"isdat",
"=",
"select",
".",
"select",
"(",
"[",
"dog",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"0",
")",
"if",
"isdat",
"[",
"0",
"]",
":",
"dat",
"=",
"os",
".",
"read",
"(",
"dog",
",",
"80192",
")",
"else",
":",
"dat",
"=",
"\"\"",
"if",
"\"Clustering\"",
"in",
"dat",
":",
"try",
":",
"newprog",
"=",
"int",
"(",
"dat",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
")",
"## may raise value error when it gets to the end",
"except",
"ValueError",
":",
"pass",
"## break if done",
"## catches end chunk of printing if clustering went really fast",
"elif",
"\"Clusters:\"",
"in",
"dat",
":",
"LOGGER",
".",
"info",
"(",
"\"ended vsearch tracking loop\"",
")",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## print progress",
"if",
"newprog",
"!=",
"prog",
":",
"print",
"(",
"newprog",
")",
"prog",
"=",
"newprog",
"## another catcher to let vsearch cleanup after clustering is done",
"proc",
".",
"wait",
"(",
")",
"print",
"(",
"100",
")",
"except",
"KeyboardInterrupt",
":",
"LOGGER",
".",
"info",
"(",
"\"interrupted vsearch here: %s\"",
",",
"proc",
".",
"pid",
")",
"os",
".",
"kill",
"(",
"proc",
".",
"pid",
",",
"2",
")",
"raise",
"KeyboardInterrupt",
"(",
")",
"except",
"sps",
".",
"CalledProcessError",
"as",
"inst",
":",
"raise",
"IPyradWarningExit",
"(",
"\"\"\"\n Error in vsearch: \\n{}\\n{}\"\"\"",
".",
"format",
"(",
"inst",
",",
"sps",
".",
"STDOUT",
")",
")",
"except",
"OSError",
"as",
"inst",
":",
"raise",
"IPyradWarningExit",
"(",
"\"\"\"\n Failed to allocate pty: \\n{}\"\"\"",
".",
"format",
"(",
"inst",
")",
")",
"finally",
":",
"data",
".",
"stats_files",
".",
"s6",
"=",
"logfile"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
build_h5_array
|
Sets up all of the h5 arrays that we will fill.
The catg array of prefiltered loci is 4-dimensional (Big), so one big
array would overload memory, we need to fill it in slices.
This will be done in multicat (singlecat) and fill_superseqs.
|
ipyrad/assemble/cluster_across.py
|
def build_h5_array(data, samples, nloci):
"""
Sets up all of the h5 arrays that we will fill.
The catg array of prefiltered loci is 4-dimensional (Big), so one big
array would overload memory, we need to fill it in slices.
This will be done in multicat (singlecat) and fill_superseqs.
"""
## sort to ensure samples will be in alphabetical order, tho they should be.
samples.sort(key=lambda x: x.name)
## get maxlen dim
maxlen = data._hackersonly["max_fragment_length"] + 20
LOGGER.info("maxlen inside build_h5_array is %s", maxlen)
LOGGER.info("nloci inside build_h5_array is %s", nloci)
## open new h5 handle
data.clust_database = os.path.join(data.dirs.across, data.name+".clust.hdf5")
io5 = h5py.File(data.clust_database, 'w')
## chunk to approximately 2 chunks per core
chunks = ((nloci // (data.cpus*2)) + (nloci % (data.cpus*2)))
## Number of elements in hdf5 chunk may not exceed 500MB
## This is probably not actually optimal, to have such
## enormous chunk sizes, could probably explore efficiency
## of smaller chunk sizes on very very large datasets
chunklen = chunks * len(samples) * maxlen * 4
while chunklen > int(500e6):
chunks = (chunks // 2) + (chunks % 2)
chunklen = chunks * len(samples) * maxlen * 4
LOGGER.info("chunks in build_h5_array: %s", chunks)
data.chunks = chunks
LOGGER.info("nloci is %s", nloci)
LOGGER.info("chunks is %s", data.chunks)
## INIT FULL CATG ARRAY
## store catgs with a .10 loci chunk size
supercatg = io5.create_dataset("catgs", (nloci, len(samples), maxlen, 4),
dtype=np.uint32,
chunks=(chunks, 1, maxlen, 4),
compression="gzip")
superseqs = io5.create_dataset("seqs", (nloci, len(samples), maxlen),
dtype="|S1",
#dtype=np.uint8,
chunks=(chunks, len(samples), maxlen),
compression='gzip')
superalls = io5.create_dataset("nalleles", (nloci, len(samples)),
dtype=np.uint8,
chunks=(chunks, len(samples)),
compression="gzip")
superchroms = io5.create_dataset("chroms", (nloci, 3),
dtype=np.int64,
chunks=(chunks, 3),
compression="gzip")
## allele count storage
supercatg.attrs["chunksize"] = (chunks, 1, maxlen, 4)
supercatg.attrs["samples"] = [i.name for i in samples]
superseqs.attrs["chunksize"] = (chunks, len(samples), maxlen)
superseqs.attrs["samples"] = [i.name for i in samples]
superalls.attrs["chunksize"] = (chunks, len(samples))
superalls.attrs["samples"] = [i.name for i in samples]
superchroms.attrs["chunksize"] = (chunks, len(samples))
superchroms.attrs["samples"] = [i.name for i in samples]
## array for pair splits locations, dup and ind filters
io5.create_dataset("splits", (nloci, ), dtype=np.uint16)
io5.create_dataset("duplicates", (nloci, ), dtype=np.bool_)
## close the big boy
io5.close()
|
def build_h5_array(data, samples, nloci):
"""
Sets up all of the h5 arrays that we will fill.
The catg array of prefiltered loci is 4-dimensional (Big), so one big
array would overload memory, we need to fill it in slices.
This will be done in multicat (singlecat) and fill_superseqs.
"""
## sort to ensure samples will be in alphabetical order, tho they should be.
samples.sort(key=lambda x: x.name)
## get maxlen dim
maxlen = data._hackersonly["max_fragment_length"] + 20
LOGGER.info("maxlen inside build_h5_array is %s", maxlen)
LOGGER.info("nloci inside build_h5_array is %s", nloci)
## open new h5 handle
data.clust_database = os.path.join(data.dirs.across, data.name+".clust.hdf5")
io5 = h5py.File(data.clust_database, 'w')
## chunk to approximately 2 chunks per core
chunks = ((nloci // (data.cpus*2)) + (nloci % (data.cpus*2)))
## Number of elements in hdf5 chunk may not exceed 500MB
## This is probably not actually optimal, to have such
## enormous chunk sizes, could probably explore efficiency
## of smaller chunk sizes on very very large datasets
chunklen = chunks * len(samples) * maxlen * 4
while chunklen > int(500e6):
chunks = (chunks // 2) + (chunks % 2)
chunklen = chunks * len(samples) * maxlen * 4
LOGGER.info("chunks in build_h5_array: %s", chunks)
data.chunks = chunks
LOGGER.info("nloci is %s", nloci)
LOGGER.info("chunks is %s", data.chunks)
## INIT FULL CATG ARRAY
## store catgs with a .10 loci chunk size
supercatg = io5.create_dataset("catgs", (nloci, len(samples), maxlen, 4),
dtype=np.uint32,
chunks=(chunks, 1, maxlen, 4),
compression="gzip")
superseqs = io5.create_dataset("seqs", (nloci, len(samples), maxlen),
dtype="|S1",
#dtype=np.uint8,
chunks=(chunks, len(samples), maxlen),
compression='gzip')
superalls = io5.create_dataset("nalleles", (nloci, len(samples)),
dtype=np.uint8,
chunks=(chunks, len(samples)),
compression="gzip")
superchroms = io5.create_dataset("chroms", (nloci, 3),
dtype=np.int64,
chunks=(chunks, 3),
compression="gzip")
## allele count storage
supercatg.attrs["chunksize"] = (chunks, 1, maxlen, 4)
supercatg.attrs["samples"] = [i.name for i in samples]
superseqs.attrs["chunksize"] = (chunks, len(samples), maxlen)
superseqs.attrs["samples"] = [i.name for i in samples]
superalls.attrs["chunksize"] = (chunks, len(samples))
superalls.attrs["samples"] = [i.name for i in samples]
superchroms.attrs["chunksize"] = (chunks, len(samples))
superchroms.attrs["samples"] = [i.name for i in samples]
## array for pair splits locations, dup and ind filters
io5.create_dataset("splits", (nloci, ), dtype=np.uint16)
io5.create_dataset("duplicates", (nloci, ), dtype=np.bool_)
## close the big boy
io5.close()
|
[
"Sets",
"up",
"all",
"of",
"the",
"h5",
"arrays",
"that",
"we",
"will",
"fill",
".",
"The",
"catg",
"array",
"of",
"prefiltered",
"loci",
"is",
"4",
"-",
"dimensional",
"(",
"Big",
")",
"so",
"one",
"big",
"array",
"would",
"overload",
"memory",
"we",
"need",
"to",
"fill",
"it",
"in",
"slices",
".",
"This",
"will",
"be",
"done",
"in",
"multicat",
"(",
"singlecat",
")",
"and",
"fill_superseqs",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L642-L714
|
[
"def",
"build_h5_array",
"(",
"data",
",",
"samples",
",",
"nloci",
")",
":",
"## sort to ensure samples will be in alphabetical order, tho they should be.",
"samples",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"name",
")",
"## get maxlen dim",
"maxlen",
"=",
"data",
".",
"_hackersonly",
"[",
"\"max_fragment_length\"",
"]",
"+",
"20",
"LOGGER",
".",
"info",
"(",
"\"maxlen inside build_h5_array is %s\"",
",",
"maxlen",
")",
"LOGGER",
".",
"info",
"(",
"\"nloci inside build_h5_array is %s\"",
",",
"nloci",
")",
"## open new h5 handle",
"data",
".",
"clust_database",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".clust.hdf5\"",
")",
"io5",
"=",
"h5py",
".",
"File",
"(",
"data",
".",
"clust_database",
",",
"'w'",
")",
"## chunk to approximately 2 chunks per core",
"chunks",
"=",
"(",
"(",
"nloci",
"//",
"(",
"data",
".",
"cpus",
"*",
"2",
")",
")",
"+",
"(",
"nloci",
"%",
"(",
"data",
".",
"cpus",
"*",
"2",
")",
")",
")",
"## Number of elements in hdf5 chunk may not exceed 500MB",
"## This is probably not actually optimal, to have such",
"## enormous chunk sizes, could probably explore efficiency",
"## of smaller chunk sizes on very very large datasets",
"chunklen",
"=",
"chunks",
"*",
"len",
"(",
"samples",
")",
"*",
"maxlen",
"*",
"4",
"while",
"chunklen",
">",
"int",
"(",
"500e6",
")",
":",
"chunks",
"=",
"(",
"chunks",
"//",
"2",
")",
"+",
"(",
"chunks",
"%",
"2",
")",
"chunklen",
"=",
"chunks",
"*",
"len",
"(",
"samples",
")",
"*",
"maxlen",
"*",
"4",
"LOGGER",
".",
"info",
"(",
"\"chunks in build_h5_array: %s\"",
",",
"chunks",
")",
"data",
".",
"chunks",
"=",
"chunks",
"LOGGER",
".",
"info",
"(",
"\"nloci is %s\"",
",",
"nloci",
")",
"LOGGER",
".",
"info",
"(",
"\"chunks is %s\"",
",",
"data",
".",
"chunks",
")",
"## INIT FULL CATG ARRAY",
"## store catgs with a .10 loci chunk size",
"supercatg",
"=",
"io5",
".",
"create_dataset",
"(",
"\"catgs\"",
",",
"(",
"nloci",
",",
"len",
"(",
"samples",
")",
",",
"maxlen",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
",",
"chunks",
"=",
"(",
"chunks",
",",
"1",
",",
"maxlen",
",",
"4",
")",
",",
"compression",
"=",
"\"gzip\"",
")",
"superseqs",
"=",
"io5",
".",
"create_dataset",
"(",
"\"seqs\"",
",",
"(",
"nloci",
",",
"len",
"(",
"samples",
")",
",",
"maxlen",
")",
",",
"dtype",
"=",
"\"|S1\"",
",",
"#dtype=np.uint8,",
"chunks",
"=",
"(",
"chunks",
",",
"len",
"(",
"samples",
")",
",",
"maxlen",
")",
",",
"compression",
"=",
"'gzip'",
")",
"superalls",
"=",
"io5",
".",
"create_dataset",
"(",
"\"nalleles\"",
",",
"(",
"nloci",
",",
"len",
"(",
"samples",
")",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
",",
"chunks",
"=",
"(",
"chunks",
",",
"len",
"(",
"samples",
")",
")",
",",
"compression",
"=",
"\"gzip\"",
")",
"superchroms",
"=",
"io5",
".",
"create_dataset",
"(",
"\"chroms\"",
",",
"(",
"nloci",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"int64",
",",
"chunks",
"=",
"(",
"chunks",
",",
"3",
")",
",",
"compression",
"=",
"\"gzip\"",
")",
"## allele count storage",
"supercatg",
".",
"attrs",
"[",
"\"chunksize\"",
"]",
"=",
"(",
"chunks",
",",
"1",
",",
"maxlen",
",",
"4",
")",
"supercatg",
".",
"attrs",
"[",
"\"samples\"",
"]",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"superseqs",
".",
"attrs",
"[",
"\"chunksize\"",
"]",
"=",
"(",
"chunks",
",",
"len",
"(",
"samples",
")",
",",
"maxlen",
")",
"superseqs",
".",
"attrs",
"[",
"\"samples\"",
"]",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"superalls",
".",
"attrs",
"[",
"\"chunksize\"",
"]",
"=",
"(",
"chunks",
",",
"len",
"(",
"samples",
")",
")",
"superalls",
".",
"attrs",
"[",
"\"samples\"",
"]",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"superchroms",
".",
"attrs",
"[",
"\"chunksize\"",
"]",
"=",
"(",
"chunks",
",",
"len",
"(",
"samples",
")",
")",
"superchroms",
".",
"attrs",
"[",
"\"samples\"",
"]",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"## array for pair splits locations, dup and ind filters",
"io5",
".",
"create_dataset",
"(",
"\"splits\"",
",",
"(",
"nloci",
",",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
"io5",
".",
"create_dataset",
"(",
"\"duplicates\"",
",",
"(",
"nloci",
",",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"## close the big boy",
"io5",
".",
"close",
"(",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
fill_dups_arr
|
fills the duplicates array from the multi_muscle_align tmp files
|
ipyrad/assemble/cluster_across.py
|
def fill_dups_arr(data):
"""
fills the duplicates array from the multi_muscle_align tmp files
"""
## build the duplicates array
duplefiles = glob.glob(os.path.join(data.tmpdir, "duples_*.tmp.npy"))
duplefiles.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-8]))
## enter the duplicates filter into super h5 array
io5 = h5py.File(data.clust_database, 'r+')
dfilter = io5["duplicates"]
## enter all duple arrays into full duplicates array
init = 0
for dupf in duplefiles:
end = int(dupf.rsplit("_", 1)[-1][:-8])
inarr = np.load(dupf)
dfilter[init:end] = inarr
init += end-init
#os.remove(dupf)
#del inarr
## continued progress bar
LOGGER.info("all duplicates: %s", dfilter[:].sum())
io5.close()
|
def fill_dups_arr(data):
"""
fills the duplicates array from the multi_muscle_align tmp files
"""
## build the duplicates array
duplefiles = glob.glob(os.path.join(data.tmpdir, "duples_*.tmp.npy"))
duplefiles.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-8]))
## enter the duplicates filter into super h5 array
io5 = h5py.File(data.clust_database, 'r+')
dfilter = io5["duplicates"]
## enter all duple arrays into full duplicates array
init = 0
for dupf in duplefiles:
end = int(dupf.rsplit("_", 1)[-1][:-8])
inarr = np.load(dupf)
dfilter[init:end] = inarr
init += end-init
#os.remove(dupf)
#del inarr
## continued progress bar
LOGGER.info("all duplicates: %s", dfilter[:].sum())
io5.close()
|
[
"fills",
"the",
"duplicates",
"array",
"from",
"the",
"multi_muscle_align",
"tmp",
"files"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L718-L742
|
[
"def",
"fill_dups_arr",
"(",
"data",
")",
":",
"## build the duplicates array",
"duplefiles",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"duples_*.tmp.npy\"",
")",
")",
"duplefiles",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"int",
"(",
"x",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"8",
"]",
")",
")",
"## enter the duplicates filter into super h5 array",
"io5",
"=",
"h5py",
".",
"File",
"(",
"data",
".",
"clust_database",
",",
"'r+'",
")",
"dfilter",
"=",
"io5",
"[",
"\"duplicates\"",
"]",
"## enter all duple arrays into full duplicates array",
"init",
"=",
"0",
"for",
"dupf",
"in",
"duplefiles",
":",
"end",
"=",
"int",
"(",
"dupf",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"8",
"]",
")",
"inarr",
"=",
"np",
".",
"load",
"(",
"dupf",
")",
"dfilter",
"[",
"init",
":",
"end",
"]",
"=",
"inarr",
"init",
"+=",
"end",
"-",
"init",
"#os.remove(dupf)",
"#del inarr",
"## continued progress bar",
"LOGGER",
".",
"info",
"(",
"\"all duplicates: %s\"",
",",
"dfilter",
"[",
":",
"]",
".",
"sum",
"(",
")",
")",
"io5",
".",
"close",
"(",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
build_tmp_h5
|
build tmp h5 arrays that can return quick access for nloci
|
ipyrad/assemble/cluster_across.py
|
def build_tmp_h5(data, samples):
""" build tmp h5 arrays that can return quick access for nloci"""
## get samples and names, sorted
snames = [i.name for i in samples]
snames.sort()
## Build an array for quickly indexing consens reads from catg files.
## save as a npy int binary file.
uhandle = os.path.join(data.dirs.across, data.name+".utemp.sort")
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
## send as first async1 job
get_seeds_and_hits(uhandle, bseeds, snames)
|
def build_tmp_h5(data, samples):
""" build tmp h5 arrays that can return quick access for nloci"""
## get samples and names, sorted
snames = [i.name for i in samples]
snames.sort()
## Build an array for quickly indexing consens reads from catg files.
## save as a npy int binary file.
uhandle = os.path.join(data.dirs.across, data.name+".utemp.sort")
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
## send as first async1 job
get_seeds_and_hits(uhandle, bseeds, snames)
|
[
"build",
"tmp",
"h5",
"arrays",
"that",
"can",
"return",
"quick",
"access",
"for",
"nloci"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L746-L758
|
[
"def",
"build_tmp_h5",
"(",
"data",
",",
"samples",
")",
":",
"## get samples and names, sorted",
"snames",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"snames",
".",
"sort",
"(",
")",
"## Build an array for quickly indexing consens reads from catg files.",
"## save as a npy int binary file.",
"uhandle",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".utemp.sort\"",
")",
"bseeds",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmparrs.h5\"",
")",
"## send as first async1 job",
"get_seeds_and_hits",
"(",
"uhandle",
",",
"bseeds",
",",
"snames",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
get_nloci
|
return nloci from the tmp h5 arr
|
ipyrad/assemble/cluster_across.py
|
def get_nloci(data):
""" return nloci from the tmp h5 arr"""
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
with h5py.File(bseeds) as io5:
return io5["seedsarr"].shape[0]
|
def get_nloci(data):
""" return nloci from the tmp h5 arr"""
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
with h5py.File(bseeds) as io5:
return io5["seedsarr"].shape[0]
|
[
"return",
"nloci",
"from",
"the",
"tmp",
"h5",
"arr"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L762-L766
|
[
"def",
"get_nloci",
"(",
"data",
")",
":",
"bseeds",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmparrs.h5\"",
")",
"with",
"h5py",
".",
"File",
"(",
"bseeds",
")",
"as",
"io5",
":",
"return",
"io5",
"[",
"\"seedsarr\"",
"]",
".",
"shape",
"[",
"0",
"]"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
get_seeds_and_hits
|
builds a seeds and hits (uarr) array of ints from the utemp.sort file.
Saves outputs to files ...
|
ipyrad/assemble/cluster_across.py
|
def get_seeds_and_hits(uhandle, bseeds, snames):
"""
builds a seeds and hits (uarr) array of ints from the utemp.sort file.
Saves outputs to files ...
"""
## Get max name length. Allow for trailing _ + up to 9 digits
## of numbers of loci (an astronomical number of unique loci)
maxlen_names = np.max(map(len, snames)) + 10
## read in the utemp.sort file
updf = np.loadtxt(uhandle, dtype="S".format(maxlen_names))
## Get seeds for all matches from usort
seeds = np.unique(updf[:, 1])
seedsarr = np.column_stack([
np.arange(len(seeds)),
[i.rsplit("_", 1)[0] for i in seeds],
[i.rsplit("_", 1)[1] for i in seeds]])
seedsarr[:, 1] = [snames.index(i) for i in seedsarr[:, 1]]
seedsarr = seedsarr.astype(np.int64)
LOGGER.info("got a seedsarr %s", seedsarr.shape)
## Get matches from usort and create an array for fast entry
uarr = np.zeros((updf.shape[0], 3), dtype=np.int64)
idx = -1
lastloc = None
for ldx in xrange(updf.shape[0]):
tloc = updf[ldx, 1]
if tloc != lastloc:
idx += 1
uarr[ldx, 0] = idx
lastloc = tloc
## create a column with sample index
uarr[:, 1] = [int(snames.index(i.rsplit("_", 1)[0])) for i in updf[:, 0]]
## create a column with only consens index for sample
uarr[:, 2] = [int(i.rsplit("_", 1)[1]) for i in updf[:, 0]]
uarr = uarr.astype(np.int64)
LOGGER.info("got a uarr %s", uarr.shape)
## save as h5 to we can grab by sample slices
with h5py.File(bseeds, 'w') as io5:
io5.create_dataset("seedsarr", data=seedsarr, dtype=np.int64)
io5.create_dataset("uarr", data=uarr, dtype=np.int64)
|
def get_seeds_and_hits(uhandle, bseeds, snames):
"""
builds a seeds and hits (uarr) array of ints from the utemp.sort file.
Saves outputs to files ...
"""
## Get max name length. Allow for trailing _ + up to 9 digits
## of numbers of loci (an astronomical number of unique loci)
maxlen_names = np.max(map(len, snames)) + 10
## read in the utemp.sort file
updf = np.loadtxt(uhandle, dtype="S".format(maxlen_names))
## Get seeds for all matches from usort
seeds = np.unique(updf[:, 1])
seedsarr = np.column_stack([
np.arange(len(seeds)),
[i.rsplit("_", 1)[0] for i in seeds],
[i.rsplit("_", 1)[1] for i in seeds]])
seedsarr[:, 1] = [snames.index(i) for i in seedsarr[:, 1]]
seedsarr = seedsarr.astype(np.int64)
LOGGER.info("got a seedsarr %s", seedsarr.shape)
## Get matches from usort and create an array for fast entry
uarr = np.zeros((updf.shape[0], 3), dtype=np.int64)
idx = -1
lastloc = None
for ldx in xrange(updf.shape[0]):
tloc = updf[ldx, 1]
if tloc != lastloc:
idx += 1
uarr[ldx, 0] = idx
lastloc = tloc
## create a column with sample index
uarr[:, 1] = [int(snames.index(i.rsplit("_", 1)[0])) for i in updf[:, 0]]
## create a column with only consens index for sample
uarr[:, 2] = [int(i.rsplit("_", 1)[1]) for i in updf[:, 0]]
uarr = uarr.astype(np.int64)
LOGGER.info("got a uarr %s", uarr.shape)
## save as h5 to we can grab by sample slices
with h5py.File(bseeds, 'w') as io5:
io5.create_dataset("seedsarr", data=seedsarr, dtype=np.int64)
io5.create_dataset("uarr", data=uarr, dtype=np.int64)
|
[
"builds",
"a",
"seeds",
"and",
"hits",
"(",
"uarr",
")",
"array",
"of",
"ints",
"from",
"the",
"utemp",
".",
"sort",
"file",
".",
"Saves",
"outputs",
"to",
"files",
"..."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L770-L811
|
[
"def",
"get_seeds_and_hits",
"(",
"uhandle",
",",
"bseeds",
",",
"snames",
")",
":",
"## Get max name length. Allow for trailing _ + up to 9 digits",
"## of numbers of loci (an astronomical number of unique loci)",
"maxlen_names",
"=",
"np",
".",
"max",
"(",
"map",
"(",
"len",
",",
"snames",
")",
")",
"+",
"10",
"## read in the utemp.sort file",
"updf",
"=",
"np",
".",
"loadtxt",
"(",
"uhandle",
",",
"dtype",
"=",
"\"S\"",
".",
"format",
"(",
"maxlen_names",
")",
")",
"## Get seeds for all matches from usort",
"seeds",
"=",
"np",
".",
"unique",
"(",
"updf",
"[",
":",
",",
"1",
"]",
")",
"seedsarr",
"=",
"np",
".",
"column_stack",
"(",
"[",
"np",
".",
"arange",
"(",
"len",
"(",
"seeds",
")",
")",
",",
"[",
"i",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"seeds",
"]",
",",
"[",
"i",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"1",
"]",
"for",
"i",
"in",
"seeds",
"]",
"]",
")",
"seedsarr",
"[",
":",
",",
"1",
"]",
"=",
"[",
"snames",
".",
"index",
"(",
"i",
")",
"for",
"i",
"in",
"seedsarr",
"[",
":",
",",
"1",
"]",
"]",
"seedsarr",
"=",
"seedsarr",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"LOGGER",
".",
"info",
"(",
"\"got a seedsarr %s\"",
",",
"seedsarr",
".",
"shape",
")",
"## Get matches from usort and create an array for fast entry",
"uarr",
"=",
"np",
".",
"zeros",
"(",
"(",
"updf",
".",
"shape",
"[",
"0",
"]",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"idx",
"=",
"-",
"1",
"lastloc",
"=",
"None",
"for",
"ldx",
"in",
"xrange",
"(",
"updf",
".",
"shape",
"[",
"0",
"]",
")",
":",
"tloc",
"=",
"updf",
"[",
"ldx",
",",
"1",
"]",
"if",
"tloc",
"!=",
"lastloc",
":",
"idx",
"+=",
"1",
"uarr",
"[",
"ldx",
",",
"0",
"]",
"=",
"idx",
"lastloc",
"=",
"tloc",
"## create a column with sample index",
"uarr",
"[",
":",
",",
"1",
"]",
"=",
"[",
"int",
"(",
"snames",
".",
"index",
"(",
"i",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"0",
"]",
")",
")",
"for",
"i",
"in",
"updf",
"[",
":",
",",
"0",
"]",
"]",
"## create a column with only consens index for sample",
"uarr",
"[",
":",
",",
"2",
"]",
"=",
"[",
"int",
"(",
"i",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"1",
"]",
")",
"for",
"i",
"in",
"updf",
"[",
":",
",",
"0",
"]",
"]",
"uarr",
"=",
"uarr",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"LOGGER",
".",
"info",
"(",
"\"got a uarr %s\"",
",",
"uarr",
".",
"shape",
")",
"## save as h5 to we can grab by sample slices",
"with",
"h5py",
".",
"File",
"(",
"bseeds",
",",
"'w'",
")",
"as",
"io5",
":",
"io5",
".",
"create_dataset",
"(",
"\"seedsarr\"",
",",
"data",
"=",
"seedsarr",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"io5",
".",
"create_dataset",
"(",
"\"uarr\"",
",",
"data",
"=",
"uarr",
",",
"dtype",
"=",
"np",
".",
"int64",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
new_multicat
|
Calls 'singlecat()' for all samples to build index files.
|
ipyrad/assemble/cluster_across.py
|
def new_multicat(data, samples, ipyclient):
"""
Calls 'singlecat()' for all samples to build index files.
"""
## track progress
LOGGER.info("in the multicat")
start = time.time()
printstr = " indexing clusters | {} | s6 |"
## Build the large h5 array. This will write a new HDF5 file and overwrite
## existing data.
nloci = get_nloci(data)
build_h5_array(data, samples, nloci)
## parallel client (reserve engine 0 for data entry), if/else here in case
## user has only one engine.
if len(ipyclient) > 1:
filler = ipyclient.load_balanced_view(targets=[0])
smallview = ipyclient.load_balanced_view(targets=ipyclient.ids[1::2])
else:
filler = ipyclient.load_balanced_view(targets=[0])
smallview = ipyclient.load_balanced_view(targets=[0])
## First submit a sleeper job as temp_flag for cleanups
last_sample = 0
cleanups = {}
cleanups[last_sample] = filler.apply(time.sleep, 0.0)
## fill the duplicates filter array
async = smallview.apply(fill_dups_arr, data)
while 1:
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if async.ready():
break
if not async.successful():
raise IPyradWarningExit(async.result())
## Get all existing .tmp.h5 files. If files exist then assume that we are
## restarting an interrupted job. We need to check for each one whether it
## has it finished being built, and whether it has been written to the
## large array yet.
snames = [i.name for i in samples]
snames.sort()
smpios = {i:os.path.join(data.dirs.across, i+'.tmp.h5') for i in snames}
## send 'singlecat()' jobs to engines
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
jobs = {}
for sample in samples:
sidx = snames.index(sample.name)
args = (data, sample, bseeds, sidx, nloci)
## Only build it if it doesn't already exist. Singlecat removes
## unfinished files if interrupted, so .tmp.h5 should not exist
## unless the file is ready to be entered.
if not os.path.exists(smpios[sample.name]):
jobs[sample.name] = smallview.apply(singlecat, *args)
## track progress of singlecat jobs and submit writing jobs for finished
## singlecat files (.tmp.h5).
alljobs = len(jobs)
while 1:
## check for finished jobs
curkeys = jobs.keys()
for key in curkeys:
async = jobs[key]
if async.ready():
if async.successful():
## submit cleanup for finished job
args = (data, data.samples[key], snames.index(key))
with filler.temp_flags(after=cleanups[last_sample]):
cleanups[key] = filler.apply(write_to_fullarr, *args)
last_sample = key
del jobs[key]
else:
if not async.successful():
raise IPyradWarningExit(async.result())
## print progress or break
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(alljobs, alljobs-len(jobs), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if not jobs:
break
## add the dask_chroms func for reference data
if 'reference' in data.paramsdict["assembly_method"]:
with filler.temp_flags(after=cleanups.values()):
cleanups['ref'] = filler.apply(dask_chroms, *(data, samples))
## ------- print breakline between indexing and writing database ---------
print("")
## track progress of databaseing
start = time.time()
printstr = " building database | {} | s6 |"
while 1:
finished = [i for i in cleanups.values() if i.ready()]
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(len(cleanups), len(finished), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
## break if one failed, or if finished
if not all([i.successful() for i in finished]):
break
if len(cleanups) == len(finished):
break
## check for errors
for job in cleanups:
if cleanups[job].ready():
if not cleanups[job].successful():
raise IPyradWarningExit((job, cleanups[job].result()))
|
def new_multicat(data, samples, ipyclient):
"""
Calls 'singlecat()' for all samples to build index files.
"""
## track progress
LOGGER.info("in the multicat")
start = time.time()
printstr = " indexing clusters | {} | s6 |"
## Build the large h5 array. This will write a new HDF5 file and overwrite
## existing data.
nloci = get_nloci(data)
build_h5_array(data, samples, nloci)
## parallel client (reserve engine 0 for data entry), if/else here in case
## user has only one engine.
if len(ipyclient) > 1:
filler = ipyclient.load_balanced_view(targets=[0])
smallview = ipyclient.load_balanced_view(targets=ipyclient.ids[1::2])
else:
filler = ipyclient.load_balanced_view(targets=[0])
smallview = ipyclient.load_balanced_view(targets=[0])
## First submit a sleeper job as temp_flag for cleanups
last_sample = 0
cleanups = {}
cleanups[last_sample] = filler.apply(time.sleep, 0.0)
## fill the duplicates filter array
async = smallview.apply(fill_dups_arr, data)
while 1:
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if async.ready():
break
if not async.successful():
raise IPyradWarningExit(async.result())
## Get all existing .tmp.h5 files. If files exist then assume that we are
## restarting an interrupted job. We need to check for each one whether it
## has it finished being built, and whether it has been written to the
## large array yet.
snames = [i.name for i in samples]
snames.sort()
smpios = {i:os.path.join(data.dirs.across, i+'.tmp.h5') for i in snames}
## send 'singlecat()' jobs to engines
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
jobs = {}
for sample in samples:
sidx = snames.index(sample.name)
args = (data, sample, bseeds, sidx, nloci)
## Only build it if it doesn't already exist. Singlecat removes
## unfinished files if interrupted, so .tmp.h5 should not exist
## unless the file is ready to be entered.
if not os.path.exists(smpios[sample.name]):
jobs[sample.name] = smallview.apply(singlecat, *args)
## track progress of singlecat jobs and submit writing jobs for finished
## singlecat files (.tmp.h5).
alljobs = len(jobs)
while 1:
## check for finished jobs
curkeys = jobs.keys()
for key in curkeys:
async = jobs[key]
if async.ready():
if async.successful():
## submit cleanup for finished job
args = (data, data.samples[key], snames.index(key))
with filler.temp_flags(after=cleanups[last_sample]):
cleanups[key] = filler.apply(write_to_fullarr, *args)
last_sample = key
del jobs[key]
else:
if not async.successful():
raise IPyradWarningExit(async.result())
## print progress or break
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(alljobs, alljobs-len(jobs), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if not jobs:
break
## add the dask_chroms func for reference data
if 'reference' in data.paramsdict["assembly_method"]:
with filler.temp_flags(after=cleanups.values()):
cleanups['ref'] = filler.apply(dask_chroms, *(data, samples))
## ------- print breakline between indexing and writing database ---------
print("")
## track progress of databaseing
start = time.time()
printstr = " building database | {} | s6 |"
while 1:
finished = [i for i in cleanups.values() if i.ready()]
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(len(cleanups), len(finished), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
## break if one failed, or if finished
if not all([i.successful() for i in finished]):
break
if len(cleanups) == len(finished):
break
## check for errors
for job in cleanups:
if cleanups[job].ready():
if not cleanups[job].successful():
raise IPyradWarningExit((job, cleanups[job].result()))
|
[
"Calls",
"singlecat",
"()",
"for",
"all",
"samples",
"to",
"build",
"index",
"files",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L816-L930
|
[
"def",
"new_multicat",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
":",
"## track progress",
"LOGGER",
".",
"info",
"(",
"\"in the multicat\"",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" indexing clusters | {} | s6 |\"",
"## Build the large h5 array. This will write a new HDF5 file and overwrite",
"## existing data. ",
"nloci",
"=",
"get_nloci",
"(",
"data",
")",
"build_h5_array",
"(",
"data",
",",
"samples",
",",
"nloci",
")",
"## parallel client (reserve engine 0 for data entry), if/else here in case",
"## user has only one engine.",
"if",
"len",
"(",
"ipyclient",
")",
">",
"1",
":",
"filler",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"[",
"0",
"]",
")",
"smallview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"ipyclient",
".",
"ids",
"[",
"1",
":",
":",
"2",
"]",
")",
"else",
":",
"filler",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"[",
"0",
"]",
")",
"smallview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"[",
"0",
"]",
")",
"## First submit a sleeper job as temp_flag for cleanups",
"last_sample",
"=",
"0",
"cleanups",
"=",
"{",
"}",
"cleanups",
"[",
"last_sample",
"]",
"=",
"filler",
".",
"apply",
"(",
"time",
".",
"sleep",
",",
"0.0",
")",
"## fill the duplicates filter array",
"async",
"=",
"smallview",
".",
"apply",
"(",
"fill_dups_arr",
",",
"data",
")",
"while",
"1",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"20",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"async",
".",
"ready",
"(",
")",
":",
"break",
"if",
"not",
"async",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"async",
".",
"result",
"(",
")",
")",
"## Get all existing .tmp.h5 files. If files exist then assume that we are",
"## restarting an interrupted job. We need to check for each one whether it ",
"## has it finished being built, and whether it has been written to the ",
"## large array yet.",
"snames",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"snames",
".",
"sort",
"(",
")",
"smpios",
"=",
"{",
"i",
":",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"i",
"+",
"'.tmp.h5'",
")",
"for",
"i",
"in",
"snames",
"}",
"## send 'singlecat()' jobs to engines",
"bseeds",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmparrs.h5\"",
")",
"jobs",
"=",
"{",
"}",
"for",
"sample",
"in",
"samples",
":",
"sidx",
"=",
"snames",
".",
"index",
"(",
"sample",
".",
"name",
")",
"args",
"=",
"(",
"data",
",",
"sample",
",",
"bseeds",
",",
"sidx",
",",
"nloci",
")",
"## Only build it if it doesn't already exist. Singlecat removes",
"## unfinished files if interrupted, so .tmp.h5 should not exist",
"## unless the file is ready to be entered. ",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"smpios",
"[",
"sample",
".",
"name",
"]",
")",
":",
"jobs",
"[",
"sample",
".",
"name",
"]",
"=",
"smallview",
".",
"apply",
"(",
"singlecat",
",",
"*",
"args",
")",
"## track progress of singlecat jobs and submit writing jobs for finished",
"## singlecat files (.tmp.h5).",
"alljobs",
"=",
"len",
"(",
"jobs",
")",
"while",
"1",
":",
"## check for finished jobs",
"curkeys",
"=",
"jobs",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"curkeys",
":",
"async",
"=",
"jobs",
"[",
"key",
"]",
"if",
"async",
".",
"ready",
"(",
")",
":",
"if",
"async",
".",
"successful",
"(",
")",
":",
"## submit cleanup for finished job",
"args",
"=",
"(",
"data",
",",
"data",
".",
"samples",
"[",
"key",
"]",
",",
"snames",
".",
"index",
"(",
"key",
")",
")",
"with",
"filler",
".",
"temp_flags",
"(",
"after",
"=",
"cleanups",
"[",
"last_sample",
"]",
")",
":",
"cleanups",
"[",
"key",
"]",
"=",
"filler",
".",
"apply",
"(",
"write_to_fullarr",
",",
"*",
"args",
")",
"last_sample",
"=",
"key",
"del",
"jobs",
"[",
"key",
"]",
"else",
":",
"if",
"not",
"async",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"async",
".",
"result",
"(",
")",
")",
"## print progress or break",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"alljobs",
",",
"alljobs",
"-",
"len",
"(",
"jobs",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"not",
"jobs",
":",
"break",
"## add the dask_chroms func for reference data",
"if",
"'reference'",
"in",
"data",
".",
"paramsdict",
"[",
"\"assembly_method\"",
"]",
":",
"with",
"filler",
".",
"temp_flags",
"(",
"after",
"=",
"cleanups",
".",
"values",
"(",
")",
")",
":",
"cleanups",
"[",
"'ref'",
"]",
"=",
"filler",
".",
"apply",
"(",
"dask_chroms",
",",
"*",
"(",
"data",
",",
"samples",
")",
")",
"## ------- print breakline between indexing and writing database ---------",
"print",
"(",
"\"\"",
")",
"## track progress of databaseing",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" building database | {} | s6 |\"",
"while",
"1",
":",
"finished",
"=",
"[",
"i",
"for",
"i",
"in",
"cleanups",
".",
"values",
"(",
")",
"if",
"i",
".",
"ready",
"(",
")",
"]",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"len",
"(",
"cleanups",
")",
",",
"len",
"(",
"finished",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## break if one failed, or if finished",
"if",
"not",
"all",
"(",
"[",
"i",
".",
"successful",
"(",
")",
"for",
"i",
"in",
"finished",
"]",
")",
":",
"break",
"if",
"len",
"(",
"cleanups",
")",
"==",
"len",
"(",
"finished",
")",
":",
"break",
"## check for errors",
"for",
"job",
"in",
"cleanups",
":",
"if",
"cleanups",
"[",
"job",
"]",
".",
"ready",
"(",
")",
":",
"if",
"not",
"cleanups",
"[",
"job",
"]",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"(",
"job",
",",
"cleanups",
"[",
"job",
"]",
".",
"result",
"(",
")",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
multicat
|
Runs singlecat and cleanup jobs for each sample.
For each sample this fills its own hdf5 array with catg data & indels.
This is messy, could use simplifiying.
|
ipyrad/assemble/cluster_across.py
|
def multicat(data, samples, ipyclient):
"""
Runs singlecat and cleanup jobs for each sample.
For each sample this fills its own hdf5 array with catg data & indels.
This is messy, could use simplifiying.
"""
## progress ticker
start = time.time()
printstr = " indexing clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
## parallel client
lbview = ipyclient.load_balanced_view()
## First submit a sleeper job as temp_flag for cleanups
last_sample = 0
cleanups = {}
cleanups[last_sample] = lbview.apply(time.sleep, 0.0)
## get samples and names, sorted
snames = [i.name for i in samples]
snames.sort()
## Build an array for quickly indexing consens reads from catg files.
## save as a npy int binary file.
uhandle = os.path.join(data.dirs.across, data.name+".utemp.sort")
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
## send as first async1 job
async1 = lbview.apply(get_seeds_and_hits, *(uhandle, bseeds, snames))
async2 = lbview.apply(fill_dups_arr, data)
## progress bar for seed/hit sorting
while not (async1.ready() and async2.ready()):
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
time.sleep(0.1)
if not async1.successful():
raise IPyradWarningExit("error in get_seeds: %s", async1.exception())
if not async2.successful():
raise IPyradWarningExit("error in fill_dups: %s", async2.exception())
## make a limited njobs view based on mem limits
## is using smallview necessary? (yes, it is for bad libraries)
smallview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2])
## make sure there are no old tmp.h5 files
smpios = [os.path.join(data.dirs.across, sample.name+'.tmp.h5') \
for sample in samples]
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
## send 'singlecat()' jobs to engines
jobs = {}
for sample in samples:
sidx = snames.index(sample.name)
jobs[sample.name] = smallview.apply(singlecat, *(data, sample, bseeds, sidx))
## check for finished and submit disk-writing job when finished
alljobs = len(jobs)
while 1:
## check for finished jobs
curkeys = jobs.keys()
for key in curkeys:
async = jobs[key]
if async.ready():
if async.successful():
## submit cleanup for finished job
args = (data, data.samples[key], snames.index(key))
with lbview.temp_flags(after=cleanups[last_sample]):
cleanups[key] = lbview.apply(write_to_fullarr, *args)
last_sample = key
del jobs[key]
else:
err = jobs[key].exception()
errmsg = "singlecat error: {} {}".format(key, err)
raise IPyradWarningExit(errmsg)
## print progress or break
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(alljobs, alljobs-len(jobs), printstr.format(elapsed))
time.sleep(0.1)
if not jobs:
break
## add the dask_chroms func for reference data
if 'reference' in data.paramsdict["assembly_method"]:
with lbview.temp_flags(after=cleanups.values()):
cleanups['ref'] = lbview.apply(dask_chroms, *(data, samples))
## wait for "write_to_fullarr" jobs to finish
print("")
start = time.time()
printstr = " building database | {} | s6 |"
while 1:
finished = [i for i in cleanups.values() if i.ready()]
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(len(cleanups), len(finished), printstr.format(elapsed))
time.sleep(0.1)
## break if one failed, or if finished
if not all([i.successful() for i in finished]):
break
if len(cleanups) == len(finished):
break
## check for errors
for job in cleanups:
if cleanups[job].ready():
if not cleanups[job].successful():
err = " error in write_to_fullarr ({}) {}"\
.format(job, cleanups[job].result())
LOGGER.error(err)
raise IPyradWarningExit(err)
## remove large indels array file and singlecat tmparr file
ifile = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
if os.path.exists(ifile):
os.remove(ifile)
if os.path.exists(bseeds):
os.remove(bseeds)
for sh5 in [os.path.join(data.dirs.across, i.name+".tmp.h5") for i in samples]:
os.remove(sh5)
## print final progress
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(10, 10, printstr.format(elapsed))
print("")
|
def multicat(data, samples, ipyclient):
"""
Runs singlecat and cleanup jobs for each sample.
For each sample this fills its own hdf5 array with catg data & indels.
This is messy, could use simplifiying.
"""
## progress ticker
start = time.time()
printstr = " indexing clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
## parallel client
lbview = ipyclient.load_balanced_view()
## First submit a sleeper job as temp_flag for cleanups
last_sample = 0
cleanups = {}
cleanups[last_sample] = lbview.apply(time.sleep, 0.0)
## get samples and names, sorted
snames = [i.name for i in samples]
snames.sort()
## Build an array for quickly indexing consens reads from catg files.
## save as a npy int binary file.
uhandle = os.path.join(data.dirs.across, data.name+".utemp.sort")
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
## send as first async1 job
async1 = lbview.apply(get_seeds_and_hits, *(uhandle, bseeds, snames))
async2 = lbview.apply(fill_dups_arr, data)
## progress bar for seed/hit sorting
while not (async1.ready() and async2.ready()):
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
time.sleep(0.1)
if not async1.successful():
raise IPyradWarningExit("error in get_seeds: %s", async1.exception())
if not async2.successful():
raise IPyradWarningExit("error in fill_dups: %s", async2.exception())
## make a limited njobs view based on mem limits
## is using smallview necessary? (yes, it is for bad libraries)
smallview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2])
## make sure there are no old tmp.h5 files
smpios = [os.path.join(data.dirs.across, sample.name+'.tmp.h5') \
for sample in samples]
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
## send 'singlecat()' jobs to engines
jobs = {}
for sample in samples:
sidx = snames.index(sample.name)
jobs[sample.name] = smallview.apply(singlecat, *(data, sample, bseeds, sidx))
## check for finished and submit disk-writing job when finished
alljobs = len(jobs)
while 1:
## check for finished jobs
curkeys = jobs.keys()
for key in curkeys:
async = jobs[key]
if async.ready():
if async.successful():
## submit cleanup for finished job
args = (data, data.samples[key], snames.index(key))
with lbview.temp_flags(after=cleanups[last_sample]):
cleanups[key] = lbview.apply(write_to_fullarr, *args)
last_sample = key
del jobs[key]
else:
err = jobs[key].exception()
errmsg = "singlecat error: {} {}".format(key, err)
raise IPyradWarningExit(errmsg)
## print progress or break
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(alljobs, alljobs-len(jobs), printstr.format(elapsed))
time.sleep(0.1)
if not jobs:
break
## add the dask_chroms func for reference data
if 'reference' in data.paramsdict["assembly_method"]:
with lbview.temp_flags(after=cleanups.values()):
cleanups['ref'] = lbview.apply(dask_chroms, *(data, samples))
## wait for "write_to_fullarr" jobs to finish
print("")
start = time.time()
printstr = " building database | {} | s6 |"
while 1:
finished = [i for i in cleanups.values() if i.ready()]
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(len(cleanups), len(finished), printstr.format(elapsed))
time.sleep(0.1)
## break if one failed, or if finished
if not all([i.successful() for i in finished]):
break
if len(cleanups) == len(finished):
break
## check for errors
for job in cleanups:
if cleanups[job].ready():
if not cleanups[job].successful():
err = " error in write_to_fullarr ({}) {}"\
.format(job, cleanups[job].result())
LOGGER.error(err)
raise IPyradWarningExit(err)
## remove large indels array file and singlecat tmparr file
ifile = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
if os.path.exists(ifile):
os.remove(ifile)
if os.path.exists(bseeds):
os.remove(bseeds)
for sh5 in [os.path.join(data.dirs.across, i.name+".tmp.h5") for i in samples]:
os.remove(sh5)
## print final progress
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(10, 10, printstr.format(elapsed))
print("")
|
[
"Runs",
"singlecat",
"and",
"cleanup",
"jobs",
"for",
"each",
"sample",
".",
"For",
"each",
"sample",
"this",
"fills",
"its",
"own",
"hdf5",
"array",
"with",
"catg",
"data",
"&",
"indels",
".",
"This",
"is",
"messy",
"could",
"use",
"simplifiying",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L934-L1062
|
[
"def",
"multicat",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
":",
"## progress ticker",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" indexing clusters | {} | s6 |\"",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"20",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
")",
"## parallel client",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"## First submit a sleeper job as temp_flag for cleanups",
"last_sample",
"=",
"0",
"cleanups",
"=",
"{",
"}",
"cleanups",
"[",
"last_sample",
"]",
"=",
"lbview",
".",
"apply",
"(",
"time",
".",
"sleep",
",",
"0.0",
")",
"## get samples and names, sorted",
"snames",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"snames",
".",
"sort",
"(",
")",
"## Build an array for quickly indexing consens reads from catg files.",
"## save as a npy int binary file.",
"uhandle",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".utemp.sort\"",
")",
"bseeds",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmparrs.h5\"",
")",
"## send as first async1 job",
"async1",
"=",
"lbview",
".",
"apply",
"(",
"get_seeds_and_hits",
",",
"*",
"(",
"uhandle",
",",
"bseeds",
",",
"snames",
")",
")",
"async2",
"=",
"lbview",
".",
"apply",
"(",
"fill_dups_arr",
",",
"data",
")",
"## progress bar for seed/hit sorting",
"while",
"not",
"(",
"async1",
".",
"ready",
"(",
")",
"and",
"async2",
".",
"ready",
"(",
")",
")",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"20",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"not",
"async1",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"\"error in get_seeds: %s\"",
",",
"async1",
".",
"exception",
"(",
")",
")",
"if",
"not",
"async2",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"\"error in fill_dups: %s\"",
",",
"async2",
".",
"exception",
"(",
")",
")",
"## make a limited njobs view based on mem limits ",
"## is using smallview necessary? (yes, it is for bad libraries)",
"smallview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"ipyclient",
".",
"ids",
"[",
":",
":",
"2",
"]",
")",
"## make sure there are no old tmp.h5 files ",
"smpios",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"sample",
".",
"name",
"+",
"'.tmp.h5'",
")",
"for",
"sample",
"in",
"samples",
"]",
"for",
"smpio",
"in",
"smpios",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"smpio",
")",
":",
"os",
".",
"remove",
"(",
"smpio",
")",
"## send 'singlecat()' jobs to engines",
"jobs",
"=",
"{",
"}",
"for",
"sample",
"in",
"samples",
":",
"sidx",
"=",
"snames",
".",
"index",
"(",
"sample",
".",
"name",
")",
"jobs",
"[",
"sample",
".",
"name",
"]",
"=",
"smallview",
".",
"apply",
"(",
"singlecat",
",",
"*",
"(",
"data",
",",
"sample",
",",
"bseeds",
",",
"sidx",
")",
")",
"## check for finished and submit disk-writing job when finished",
"alljobs",
"=",
"len",
"(",
"jobs",
")",
"while",
"1",
":",
"## check for finished jobs",
"curkeys",
"=",
"jobs",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"curkeys",
":",
"async",
"=",
"jobs",
"[",
"key",
"]",
"if",
"async",
".",
"ready",
"(",
")",
":",
"if",
"async",
".",
"successful",
"(",
")",
":",
"## submit cleanup for finished job",
"args",
"=",
"(",
"data",
",",
"data",
".",
"samples",
"[",
"key",
"]",
",",
"snames",
".",
"index",
"(",
"key",
")",
")",
"with",
"lbview",
".",
"temp_flags",
"(",
"after",
"=",
"cleanups",
"[",
"last_sample",
"]",
")",
":",
"cleanups",
"[",
"key",
"]",
"=",
"lbview",
".",
"apply",
"(",
"write_to_fullarr",
",",
"*",
"args",
")",
"last_sample",
"=",
"key",
"del",
"jobs",
"[",
"key",
"]",
"else",
":",
"err",
"=",
"jobs",
"[",
"key",
"]",
".",
"exception",
"(",
")",
"errmsg",
"=",
"\"singlecat error: {} {}\"",
".",
"format",
"(",
"key",
",",
"err",
")",
"raise",
"IPyradWarningExit",
"(",
"errmsg",
")",
"## print progress or break",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"alljobs",
",",
"alljobs",
"-",
"len",
"(",
"jobs",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"not",
"jobs",
":",
"break",
"## add the dask_chroms func for reference data",
"if",
"'reference'",
"in",
"data",
".",
"paramsdict",
"[",
"\"assembly_method\"",
"]",
":",
"with",
"lbview",
".",
"temp_flags",
"(",
"after",
"=",
"cleanups",
".",
"values",
"(",
")",
")",
":",
"cleanups",
"[",
"'ref'",
"]",
"=",
"lbview",
".",
"apply",
"(",
"dask_chroms",
",",
"*",
"(",
"data",
",",
"samples",
")",
")",
"## wait for \"write_to_fullarr\" jobs to finish",
"print",
"(",
"\"\"",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" building database | {} | s6 |\"",
"while",
"1",
":",
"finished",
"=",
"[",
"i",
"for",
"i",
"in",
"cleanups",
".",
"values",
"(",
")",
"if",
"i",
".",
"ready",
"(",
")",
"]",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"len",
"(",
"cleanups",
")",
",",
"len",
"(",
"finished",
")",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## break if one failed, or if finished",
"if",
"not",
"all",
"(",
"[",
"i",
".",
"successful",
"(",
")",
"for",
"i",
"in",
"finished",
"]",
")",
":",
"break",
"if",
"len",
"(",
"cleanups",
")",
"==",
"len",
"(",
"finished",
")",
":",
"break",
"## check for errors",
"for",
"job",
"in",
"cleanups",
":",
"if",
"cleanups",
"[",
"job",
"]",
".",
"ready",
"(",
")",
":",
"if",
"not",
"cleanups",
"[",
"job",
"]",
".",
"successful",
"(",
")",
":",
"err",
"=",
"\" error in write_to_fullarr ({}) {}\"",
".",
"format",
"(",
"job",
",",
"cleanups",
"[",
"job",
"]",
".",
"result",
"(",
")",
")",
"LOGGER",
".",
"error",
"(",
"err",
")",
"raise",
"IPyradWarningExit",
"(",
"err",
")",
"## remove large indels array file and singlecat tmparr file",
"ifile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmp.indels.hdf5\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"ifile",
")",
":",
"os",
".",
"remove",
"(",
"ifile",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"bseeds",
")",
":",
"os",
".",
"remove",
"(",
"bseeds",
")",
"for",
"sh5",
"in",
"[",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"i",
".",
"name",
"+",
"\".tmp.h5\"",
")",
"for",
"i",
"in",
"samples",
"]",
":",
"os",
".",
"remove",
"(",
"sh5",
")",
"## print final progress",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"10",
",",
"10",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
")",
"print",
"(",
"\"\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
singlecat
|
Orders catg data for each sample into the final locus order. This allows
all of the individual catgs to simply be combined later. They are also in
the same order as the indels array, so indels are inserted from the indel
array that is passed in.
|
ipyrad/assemble/cluster_across.py
|
def singlecat(data, sample, bseeds, sidx, nloci):
"""
Orders catg data for each sample into the final locus order. This allows
all of the individual catgs to simply be combined later. They are also in
the same order as the indels array, so indels are inserted from the indel
array that is passed in.
"""
LOGGER.info("in single cat here")
## enter ref data?
isref = 'reference' in data.paramsdict["assembly_method"]
## grab seeds and hits info for this sample
with h5py.File(bseeds, 'r') as io5:
## get hits just for this sample and sort them by sample order index
hits = io5["uarr"][:]
hits = hits[hits[:, 1] == sidx, :]
#hits = hits[hits[:, 2].argsort()]
## get seeds just for this sample and sort them by sample order index
seeds = io5["seedsarr"][:]
seeds = seeds[seeds[:, 1] == sidx, :]
#seeds = seeds[seeds[:, 2].argsort()]
full = np.concatenate((seeds, hits))
full = full[full[:, 0].argsort()]
## still using max+20 len limit, rare longer merged reads get trimmed
## we need to allow room for indels to be added too
maxlen = data._hackersonly["max_fragment_length"] + 20
## we'll fill a new catg and alleles arr for this sample in locus order,
## which is known from seeds and hits
ocatg = np.zeros((nloci, maxlen, 4), dtype=np.uint32)
onall = np.zeros(nloci, dtype=np.uint8)
ochrom = np.zeros((nloci, 3), dtype=np.int64)
## grab the sample's data and write to ocatg and onall
if not sample.files.database:
raise IPyradWarningExit("missing catg file - {}".format(sample.name))
with h5py.File(sample.files.database, 'r') as io5:
## get it and delete it
catarr = io5["catg"][:]
tmp = catarr[full[:, 2], :maxlen, :]
del catarr
ocatg[full[:, 0], :tmp.shape[1], :] = tmp
del tmp
## get it and delete it
nall = io5["nalleles"][:]
onall[full[:, 0]] = nall[full[:, 2]]
del nall
## fill the reference data
if isref:
chrom = io5["chroms"][:]
ochrom[full[:, 0]] = chrom[full[:, 2]]
del chrom
## get indel locations for this sample
ipath = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
with h5py.File(ipath, 'r') as ih5:
indels = ih5["indels"][sidx, :, :maxlen]
## insert indels into ocatg
newcatg = inserted_indels(indels, ocatg)
del ocatg, indels
## save individual tmp h5 data
smpio = os.path.join(data.dirs.across, sample.name+'.tmp.h5')
with h5py.File(smpio, 'w') as oh5:
oh5.create_dataset("icatg", data=newcatg, dtype=np.uint32)
oh5.create_dataset("inall", data=onall, dtype=np.uint8)
if isref:
oh5.create_dataset("ichrom", data=ochrom, dtype=np.int64)
|
def singlecat(data, sample, bseeds, sidx, nloci):
"""
Orders catg data for each sample into the final locus order. This allows
all of the individual catgs to simply be combined later. They are also in
the same order as the indels array, so indels are inserted from the indel
array that is passed in.
"""
LOGGER.info("in single cat here")
## enter ref data?
isref = 'reference' in data.paramsdict["assembly_method"]
## grab seeds and hits info for this sample
with h5py.File(bseeds, 'r') as io5:
## get hits just for this sample and sort them by sample order index
hits = io5["uarr"][:]
hits = hits[hits[:, 1] == sidx, :]
#hits = hits[hits[:, 2].argsort()]
## get seeds just for this sample and sort them by sample order index
seeds = io5["seedsarr"][:]
seeds = seeds[seeds[:, 1] == sidx, :]
#seeds = seeds[seeds[:, 2].argsort()]
full = np.concatenate((seeds, hits))
full = full[full[:, 0].argsort()]
## still using max+20 len limit, rare longer merged reads get trimmed
## we need to allow room for indels to be added too
maxlen = data._hackersonly["max_fragment_length"] + 20
## we'll fill a new catg and alleles arr for this sample in locus order,
## which is known from seeds and hits
ocatg = np.zeros((nloci, maxlen, 4), dtype=np.uint32)
onall = np.zeros(nloci, dtype=np.uint8)
ochrom = np.zeros((nloci, 3), dtype=np.int64)
## grab the sample's data and write to ocatg and onall
if not sample.files.database:
raise IPyradWarningExit("missing catg file - {}".format(sample.name))
with h5py.File(sample.files.database, 'r') as io5:
## get it and delete it
catarr = io5["catg"][:]
tmp = catarr[full[:, 2], :maxlen, :]
del catarr
ocatg[full[:, 0], :tmp.shape[1], :] = tmp
del tmp
## get it and delete it
nall = io5["nalleles"][:]
onall[full[:, 0]] = nall[full[:, 2]]
del nall
## fill the reference data
if isref:
chrom = io5["chroms"][:]
ochrom[full[:, 0]] = chrom[full[:, 2]]
del chrom
## get indel locations for this sample
ipath = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
with h5py.File(ipath, 'r') as ih5:
indels = ih5["indels"][sidx, :, :maxlen]
## insert indels into ocatg
newcatg = inserted_indels(indels, ocatg)
del ocatg, indels
## save individual tmp h5 data
smpio = os.path.join(data.dirs.across, sample.name+'.tmp.h5')
with h5py.File(smpio, 'w') as oh5:
oh5.create_dataset("icatg", data=newcatg, dtype=np.uint32)
oh5.create_dataset("inall", data=onall, dtype=np.uint8)
if isref:
oh5.create_dataset("ichrom", data=ochrom, dtype=np.int64)
|
[
"Orders",
"catg",
"data",
"for",
"each",
"sample",
"into",
"the",
"final",
"locus",
"order",
".",
"This",
"allows",
"all",
"of",
"the",
"individual",
"catgs",
"to",
"simply",
"be",
"combined",
"later",
".",
"They",
"are",
"also",
"in",
"the",
"same",
"order",
"as",
"the",
"indels",
"array",
"so",
"indels",
"are",
"inserted",
"from",
"the",
"indel",
"array",
"that",
"is",
"passed",
"in",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1067-L1140
|
[
"def",
"singlecat",
"(",
"data",
",",
"sample",
",",
"bseeds",
",",
"sidx",
",",
"nloci",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"in single cat here\"",
")",
"## enter ref data?",
"isref",
"=",
"'reference'",
"in",
"data",
".",
"paramsdict",
"[",
"\"assembly_method\"",
"]",
"## grab seeds and hits info for this sample",
"with",
"h5py",
".",
"File",
"(",
"bseeds",
",",
"'r'",
")",
"as",
"io5",
":",
"## get hits just for this sample and sort them by sample order index",
"hits",
"=",
"io5",
"[",
"\"uarr\"",
"]",
"[",
":",
"]",
"hits",
"=",
"hits",
"[",
"hits",
"[",
":",
",",
"1",
"]",
"==",
"sidx",
",",
":",
"]",
"#hits = hits[hits[:, 2].argsort()]",
"## get seeds just for this sample and sort them by sample order index",
"seeds",
"=",
"io5",
"[",
"\"seedsarr\"",
"]",
"[",
":",
"]",
"seeds",
"=",
"seeds",
"[",
"seeds",
"[",
":",
",",
"1",
"]",
"==",
"sidx",
",",
":",
"]",
"#seeds = seeds[seeds[:, 2].argsort()]",
"full",
"=",
"np",
".",
"concatenate",
"(",
"(",
"seeds",
",",
"hits",
")",
")",
"full",
"=",
"full",
"[",
"full",
"[",
":",
",",
"0",
"]",
".",
"argsort",
"(",
")",
"]",
"## still using max+20 len limit, rare longer merged reads get trimmed",
"## we need to allow room for indels to be added too",
"maxlen",
"=",
"data",
".",
"_hackersonly",
"[",
"\"max_fragment_length\"",
"]",
"+",
"20",
"## we'll fill a new catg and alleles arr for this sample in locus order,",
"## which is known from seeds and hits",
"ocatg",
"=",
"np",
".",
"zeros",
"(",
"(",
"nloci",
",",
"maxlen",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"onall",
"=",
"np",
".",
"zeros",
"(",
"nloci",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"ochrom",
"=",
"np",
".",
"zeros",
"(",
"(",
"nloci",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"## grab the sample's data and write to ocatg and onall",
"if",
"not",
"sample",
".",
"files",
".",
"database",
":",
"raise",
"IPyradWarningExit",
"(",
"\"missing catg file - {}\"",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"with",
"h5py",
".",
"File",
"(",
"sample",
".",
"files",
".",
"database",
",",
"'r'",
")",
"as",
"io5",
":",
"## get it and delete it",
"catarr",
"=",
"io5",
"[",
"\"catg\"",
"]",
"[",
":",
"]",
"tmp",
"=",
"catarr",
"[",
"full",
"[",
":",
",",
"2",
"]",
",",
":",
"maxlen",
",",
":",
"]",
"del",
"catarr",
"ocatg",
"[",
"full",
"[",
":",
",",
"0",
"]",
",",
":",
"tmp",
".",
"shape",
"[",
"1",
"]",
",",
":",
"]",
"=",
"tmp",
"del",
"tmp",
"## get it and delete it",
"nall",
"=",
"io5",
"[",
"\"nalleles\"",
"]",
"[",
":",
"]",
"onall",
"[",
"full",
"[",
":",
",",
"0",
"]",
"]",
"=",
"nall",
"[",
"full",
"[",
":",
",",
"2",
"]",
"]",
"del",
"nall",
"## fill the reference data",
"if",
"isref",
":",
"chrom",
"=",
"io5",
"[",
"\"chroms\"",
"]",
"[",
":",
"]",
"ochrom",
"[",
"full",
"[",
":",
",",
"0",
"]",
"]",
"=",
"chrom",
"[",
"full",
"[",
":",
",",
"2",
"]",
"]",
"del",
"chrom",
"## get indel locations for this sample",
"ipath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmp.indels.hdf5\"",
")",
"with",
"h5py",
".",
"File",
"(",
"ipath",
",",
"'r'",
")",
"as",
"ih5",
":",
"indels",
"=",
"ih5",
"[",
"\"indels\"",
"]",
"[",
"sidx",
",",
":",
",",
":",
"maxlen",
"]",
"## insert indels into ocatg",
"newcatg",
"=",
"inserted_indels",
"(",
"indels",
",",
"ocatg",
")",
"del",
"ocatg",
",",
"indels",
"## save individual tmp h5 data",
"smpio",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"sample",
".",
"name",
"+",
"'.tmp.h5'",
")",
"with",
"h5py",
".",
"File",
"(",
"smpio",
",",
"'w'",
")",
"as",
"oh5",
":",
"oh5",
".",
"create_dataset",
"(",
"\"icatg\"",
",",
"data",
"=",
"newcatg",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"oh5",
".",
"create_dataset",
"(",
"\"inall\"",
",",
"data",
"=",
"onall",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"if",
"isref",
":",
"oh5",
".",
"create_dataset",
"(",
"\"ichrom\"",
",",
"data",
"=",
"ochrom",
",",
"dtype",
"=",
"np",
".",
"int64",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
write_to_fullarr
|
writes arrays to h5 disk
|
ipyrad/assemble/cluster_across.py
|
def write_to_fullarr(data, sample, sidx):
""" writes arrays to h5 disk """
## enter ref data?
#isref = 'reference' in data.paramsdict["assembly_method"]
LOGGER.info("writing fullarr %s %s", sample.name, sidx)
## save big arrays to disk temporarily
with h5py.File(data.clust_database, 'r+') as io5:
## open views into the arrays we plan to fill
chunk = io5["catgs"].attrs["chunksize"][0]
catg = io5["catgs"]
nall = io5["nalleles"]
## adding an axis to newcatg makes it write about 1000X faster.
smpio = os.path.join(data.dirs.across, sample.name+'.tmp.h5')
with h5py.File(smpio) as indat:
## grab all of the data from this sample's arrays
newcatg = indat["icatg"] #[:]
onall = indat["inall"] #[:]
## enter it into the full array one chunk at a time
for cidx in xrange(0, catg.shape[0], chunk):
end = cidx + chunk
catg[cidx:end, sidx:sidx+1, :] = np.expand_dims(newcatg[cidx:end, :], axis=1)
nall[:, sidx:sidx+1] = np.expand_dims(onall, axis=1)
|
def write_to_fullarr(data, sample, sidx):
""" writes arrays to h5 disk """
## enter ref data?
#isref = 'reference' in data.paramsdict["assembly_method"]
LOGGER.info("writing fullarr %s %s", sample.name, sidx)
## save big arrays to disk temporarily
with h5py.File(data.clust_database, 'r+') as io5:
## open views into the arrays we plan to fill
chunk = io5["catgs"].attrs["chunksize"][0]
catg = io5["catgs"]
nall = io5["nalleles"]
## adding an axis to newcatg makes it write about 1000X faster.
smpio = os.path.join(data.dirs.across, sample.name+'.tmp.h5')
with h5py.File(smpio) as indat:
## grab all of the data from this sample's arrays
newcatg = indat["icatg"] #[:]
onall = indat["inall"] #[:]
## enter it into the full array one chunk at a time
for cidx in xrange(0, catg.shape[0], chunk):
end = cidx + chunk
catg[cidx:end, sidx:sidx+1, :] = np.expand_dims(newcatg[cidx:end, :], axis=1)
nall[:, sidx:sidx+1] = np.expand_dims(onall, axis=1)
|
[
"writes",
"arrays",
"to",
"h5",
"disk"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1150-L1176
|
[
"def",
"write_to_fullarr",
"(",
"data",
",",
"sample",
",",
"sidx",
")",
":",
"## enter ref data?",
"#isref = 'reference' in data.paramsdict[\"assembly_method\"]",
"LOGGER",
".",
"info",
"(",
"\"writing fullarr %s %s\"",
",",
"sample",
".",
"name",
",",
"sidx",
")",
"## save big arrays to disk temporarily",
"with",
"h5py",
".",
"File",
"(",
"data",
".",
"clust_database",
",",
"'r+'",
")",
"as",
"io5",
":",
"## open views into the arrays we plan to fill",
"chunk",
"=",
"io5",
"[",
"\"catgs\"",
"]",
".",
"attrs",
"[",
"\"chunksize\"",
"]",
"[",
"0",
"]",
"catg",
"=",
"io5",
"[",
"\"catgs\"",
"]",
"nall",
"=",
"io5",
"[",
"\"nalleles\"",
"]",
"## adding an axis to newcatg makes it write about 1000X faster.",
"smpio",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"sample",
".",
"name",
"+",
"'.tmp.h5'",
")",
"with",
"h5py",
".",
"File",
"(",
"smpio",
")",
"as",
"indat",
":",
"## grab all of the data from this sample's arrays",
"newcatg",
"=",
"indat",
"[",
"\"icatg\"",
"]",
"#[:]",
"onall",
"=",
"indat",
"[",
"\"inall\"",
"]",
"#[:]",
"## enter it into the full array one chunk at a time",
"for",
"cidx",
"in",
"xrange",
"(",
"0",
",",
"catg",
".",
"shape",
"[",
"0",
"]",
",",
"chunk",
")",
":",
"end",
"=",
"cidx",
"+",
"chunk",
"catg",
"[",
"cidx",
":",
"end",
",",
"sidx",
":",
"sidx",
"+",
"1",
",",
":",
"]",
"=",
"np",
".",
"expand_dims",
"(",
"newcatg",
"[",
"cidx",
":",
"end",
",",
":",
"]",
",",
"axis",
"=",
"1",
")",
"nall",
"[",
":",
",",
"sidx",
":",
"sidx",
"+",
"1",
"]",
"=",
"np",
".",
"expand_dims",
"(",
"onall",
",",
"axis",
"=",
"1",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
dask_chroms
|
A dask relay function to fill chroms for all samples
|
ipyrad/assemble/cluster_across.py
|
def dask_chroms(data, samples):
"""
A dask relay function to fill chroms for all samples
"""
## example concatenating with dask
h5s = [os.path.join(data.dirs.across, s.name+".tmp.h5") for s in samples]
handles = [h5py.File(i) for i in h5s]
dsets = [i['/ichrom'] for i in handles]
arrays = [da.from_array(dset, chunks=(10000, 3)) for dset in dsets]
stack = da.stack(arrays, axis=2)
## max chrom (should we check for variable hits? if so, things can get wonk)
maxchrom = da.max(stack, axis=2)[:, 0]
## max pos
maxpos = da.max(stack, axis=2)[:, 2]
## min pos
mask = stack == 0
stack[mask] = 9223372036854775807 ## max int64 value
minpos = da.min(stack, axis=2)[:, 1]
final = da.stack([maxchrom, minpos, maxpos], axis=1)
final.to_hdf5(data.clust_database, "/chroms")
## close the h5 handles
_ = [i.close() for i in handles]
|
def dask_chroms(data, samples):
"""
A dask relay function to fill chroms for all samples
"""
## example concatenating with dask
h5s = [os.path.join(data.dirs.across, s.name+".tmp.h5") for s in samples]
handles = [h5py.File(i) for i in h5s]
dsets = [i['/ichrom'] for i in handles]
arrays = [da.from_array(dset, chunks=(10000, 3)) for dset in dsets]
stack = da.stack(arrays, axis=2)
## max chrom (should we check for variable hits? if so, things can get wonk)
maxchrom = da.max(stack, axis=2)[:, 0]
## max pos
maxpos = da.max(stack, axis=2)[:, 2]
## min pos
mask = stack == 0
stack[mask] = 9223372036854775807 ## max int64 value
minpos = da.min(stack, axis=2)[:, 1]
final = da.stack([maxchrom, minpos, maxpos], axis=1)
final.to_hdf5(data.clust_database, "/chroms")
## close the h5 handles
_ = [i.close() for i in handles]
|
[
"A",
"dask",
"relay",
"function",
"to",
"fill",
"chroms",
"for",
"all",
"samples"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1180-L1206
|
[
"def",
"dask_chroms",
"(",
"data",
",",
"samples",
")",
":",
"## example concatenating with dask",
"h5s",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"s",
".",
"name",
"+",
"\".tmp.h5\"",
")",
"for",
"s",
"in",
"samples",
"]",
"handles",
"=",
"[",
"h5py",
".",
"File",
"(",
"i",
")",
"for",
"i",
"in",
"h5s",
"]",
"dsets",
"=",
"[",
"i",
"[",
"'/ichrom'",
"]",
"for",
"i",
"in",
"handles",
"]",
"arrays",
"=",
"[",
"da",
".",
"from_array",
"(",
"dset",
",",
"chunks",
"=",
"(",
"10000",
",",
"3",
")",
")",
"for",
"dset",
"in",
"dsets",
"]",
"stack",
"=",
"da",
".",
"stack",
"(",
"arrays",
",",
"axis",
"=",
"2",
")",
"## max chrom (should we check for variable hits? if so, things can get wonk)",
"maxchrom",
"=",
"da",
".",
"max",
"(",
"stack",
",",
"axis",
"=",
"2",
")",
"[",
":",
",",
"0",
"]",
"## max pos",
"maxpos",
"=",
"da",
".",
"max",
"(",
"stack",
",",
"axis",
"=",
"2",
")",
"[",
":",
",",
"2",
"]",
"## min pos",
"mask",
"=",
"stack",
"==",
"0",
"stack",
"[",
"mask",
"]",
"=",
"9223372036854775807",
"## max int64 value",
"minpos",
"=",
"da",
".",
"min",
"(",
"stack",
",",
"axis",
"=",
"2",
")",
"[",
":",
",",
"1",
"]",
"final",
"=",
"da",
".",
"stack",
"(",
"[",
"maxchrom",
",",
"minpos",
",",
"maxpos",
"]",
",",
"axis",
"=",
"1",
")",
"final",
".",
"to_hdf5",
"(",
"data",
".",
"clust_database",
",",
"\"/chroms\"",
")",
"## close the h5 handles",
"_",
"=",
"[",
"i",
".",
"close",
"(",
")",
"for",
"i",
"in",
"handles",
"]"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
inserted_indels
|
inserts indels into the catg array
|
ipyrad/assemble/cluster_across.py
|
def inserted_indels(indels, ocatg):
"""
inserts indels into the catg array
"""
## return copy with indels inserted
newcatg = np.zeros(ocatg.shape, dtype=np.uint32)
## iterate over loci and make extensions for indels
for iloc in xrange(ocatg.shape[0]):
## get indels indices
indidx = np.where(indels[iloc, :])[0]
if np.any(indidx):
## which new (empty) rows will be added
allrows = np.arange(ocatg.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newcatg[iloc][not_idx] = ocatg[iloc, :not_idx.shape[0]]
else:
newcatg[iloc] = ocatg[iloc]
return newcatg
|
def inserted_indels(indels, ocatg):
"""
inserts indels into the catg array
"""
## return copy with indels inserted
newcatg = np.zeros(ocatg.shape, dtype=np.uint32)
## iterate over loci and make extensions for indels
for iloc in xrange(ocatg.shape[0]):
## get indels indices
indidx = np.where(indels[iloc, :])[0]
if np.any(indidx):
## which new (empty) rows will be added
allrows = np.arange(ocatg.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newcatg[iloc][not_idx] = ocatg[iloc, :not_idx.shape[0]]
else:
newcatg[iloc] = ocatg[iloc]
return newcatg
|
[
"inserts",
"indels",
"into",
"the",
"catg",
"array"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1214-L1237
|
[
"def",
"inserted_indels",
"(",
"indels",
",",
"ocatg",
")",
":",
"## return copy with indels inserted",
"newcatg",
"=",
"np",
".",
"zeros",
"(",
"ocatg",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"## iterate over loci and make extensions for indels",
"for",
"iloc",
"in",
"xrange",
"(",
"ocatg",
".",
"shape",
"[",
"0",
"]",
")",
":",
"## get indels indices",
"indidx",
"=",
"np",
".",
"where",
"(",
"indels",
"[",
"iloc",
",",
":",
"]",
")",
"[",
"0",
"]",
"if",
"np",
".",
"any",
"(",
"indidx",
")",
":",
"## which new (empty) rows will be added",
"allrows",
"=",
"np",
".",
"arange",
"(",
"ocatg",
".",
"shape",
"[",
"1",
"]",
")",
"mask",
"=",
"np",
".",
"ones",
"(",
"allrows",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"for",
"idx",
"in",
"indidx",
":",
"mask",
"[",
"idx",
"]",
"=",
"False",
"not_idx",
"=",
"allrows",
"[",
"mask",
"==",
"1",
"]",
"## fill in new data into all other spots",
"newcatg",
"[",
"iloc",
"]",
"[",
"not_idx",
"]",
"=",
"ocatg",
"[",
"iloc",
",",
":",
"not_idx",
".",
"shape",
"[",
"0",
"]",
"]",
"else",
":",
"newcatg",
"[",
"iloc",
"]",
"=",
"ocatg",
"[",
"iloc",
"]",
"return",
"newcatg"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
fill_superseqs
|
Fills the superseqs array with seq data from cat.clust
and fill the edges array with information about paired split locations.
|
ipyrad/assemble/cluster_across.py
|
def fill_superseqs(data, samples):
"""
Fills the superseqs array with seq data from cat.clust
and fill the edges array with information about paired split locations.
"""
## load super to get edges
io5 = h5py.File(data.clust_database, 'r+')
superseqs = io5["seqs"]
splits = io5["splits"]
## samples are already sorted
snames = [i.name for i in samples]
LOGGER.info("snames %s", snames)
## get maxlen again
maxlen = data._hackersonly["max_fragment_length"] + 20
LOGGER.info("maxlen inside fill_superseqs is %s", maxlen)
## data has to be entered in blocks
infile = os.path.join(data.dirs.across, data.name+"_catclust.gz")
clusters = gzip.open(infile, 'r')
pairdealer = itertools.izip(*[iter(clusters)]*2)
## iterate over clusters
chunks = superseqs.attrs["chunksize"]
chunksize = chunks[0]
done = 0
iloc = 0
cloc = 0
chunkseqs = np.zeros(chunks, dtype="|S1")
chunkedge = np.zeros(chunksize, dtype=np.uint16)
while 1:
try:
done, chunk = clustdealer(pairdealer, 1)
except IndexError:
raise IPyradWarningExit("clustfile formatting error in %s", chunk)
## if chunk is full put into superseqs and reset counter
if cloc == chunksize:
LOGGER.info("cloc chunk writing %s", cloc)
superseqs[iloc-cloc:iloc] = chunkseqs
splits[iloc-cloc:iloc] = chunkedge
## reset chunkseqs, chunkedge, cloc
cloc = 0
chunkseqs = np.zeros((chunksize, len(samples), maxlen), dtype="|S1")
chunkedge = np.zeros((chunksize), dtype=np.uint16)
## get seq and split it
if chunk:
try:
fill = np.zeros((len(samples), maxlen), dtype="|S1")
fill.fill("N")
piece = chunk[0].strip().split("\n")
names = piece[0::2]
seqs = np.array([list(i) for i in piece[1::2]])
## fill in the separator if it exists
separator = np.where(np.all(seqs == 'n', axis=0))[0]
if np.any(separator):
chunkedge[cloc] = separator.min()
## fill in the hits
## seqs will be (5,) IF the seqs are variable lengths, which
## can happen if it had duplicaes AND there were indels, and
## so the indels did not get aligned
try:
shlen = seqs.shape[1]
except IndexError as inst:
shlen = min([len(x) for x in seqs])
for name, seq in zip(names, seqs):
sidx = snames.index(name.rsplit("_", 1)[0])
#fill[sidx, :shlen] = seq[:maxlen]
fill[sidx, :shlen] = seq[:shlen]
## PUT seqs INTO local ARRAY
chunkseqs[cloc] = fill
except Exception as inst:
LOGGER.info(inst)
LOGGER.info("\nfill: %s\nshlen %s\nmaxlen %s", fill.shape, shlen, maxlen)
LOGGER.info("dupe chunk \n{}".format("\n".join(chunk)))
## increase counters if there was a chunk
cloc += 1
iloc += 1
if done:
break
## write final leftover chunk
superseqs[iloc-cloc:,] = chunkseqs[:cloc]
splits[iloc-cloc:] = chunkedge[:cloc]
## close super
io5.close()
clusters.close()
## edges is filled with splits for paired data.
LOGGER.info("done filling superseqs")
|
def fill_superseqs(data, samples):
"""
Fills the superseqs array with seq data from cat.clust
and fill the edges array with information about paired split locations.
"""
## load super to get edges
io5 = h5py.File(data.clust_database, 'r+')
superseqs = io5["seqs"]
splits = io5["splits"]
## samples are already sorted
snames = [i.name for i in samples]
LOGGER.info("snames %s", snames)
## get maxlen again
maxlen = data._hackersonly["max_fragment_length"] + 20
LOGGER.info("maxlen inside fill_superseqs is %s", maxlen)
## data has to be entered in blocks
infile = os.path.join(data.dirs.across, data.name+"_catclust.gz")
clusters = gzip.open(infile, 'r')
pairdealer = itertools.izip(*[iter(clusters)]*2)
## iterate over clusters
chunks = superseqs.attrs["chunksize"]
chunksize = chunks[0]
done = 0
iloc = 0
cloc = 0
chunkseqs = np.zeros(chunks, dtype="|S1")
chunkedge = np.zeros(chunksize, dtype=np.uint16)
while 1:
try:
done, chunk = clustdealer(pairdealer, 1)
except IndexError:
raise IPyradWarningExit("clustfile formatting error in %s", chunk)
## if chunk is full put into superseqs and reset counter
if cloc == chunksize:
LOGGER.info("cloc chunk writing %s", cloc)
superseqs[iloc-cloc:iloc] = chunkseqs
splits[iloc-cloc:iloc] = chunkedge
## reset chunkseqs, chunkedge, cloc
cloc = 0
chunkseqs = np.zeros((chunksize, len(samples), maxlen), dtype="|S1")
chunkedge = np.zeros((chunksize), dtype=np.uint16)
## get seq and split it
if chunk:
try:
fill = np.zeros((len(samples), maxlen), dtype="|S1")
fill.fill("N")
piece = chunk[0].strip().split("\n")
names = piece[0::2]
seqs = np.array([list(i) for i in piece[1::2]])
## fill in the separator if it exists
separator = np.where(np.all(seqs == 'n', axis=0))[0]
if np.any(separator):
chunkedge[cloc] = separator.min()
## fill in the hits
## seqs will be (5,) IF the seqs are variable lengths, which
## can happen if it had duplicaes AND there were indels, and
## so the indels did not get aligned
try:
shlen = seqs.shape[1]
except IndexError as inst:
shlen = min([len(x) for x in seqs])
for name, seq in zip(names, seqs):
sidx = snames.index(name.rsplit("_", 1)[0])
#fill[sidx, :shlen] = seq[:maxlen]
fill[sidx, :shlen] = seq[:shlen]
## PUT seqs INTO local ARRAY
chunkseqs[cloc] = fill
except Exception as inst:
LOGGER.info(inst)
LOGGER.info("\nfill: %s\nshlen %s\nmaxlen %s", fill.shape, shlen, maxlen)
LOGGER.info("dupe chunk \n{}".format("\n".join(chunk)))
## increase counters if there was a chunk
cloc += 1
iloc += 1
if done:
break
## write final leftover chunk
superseqs[iloc-cloc:,] = chunkseqs[:cloc]
splits[iloc-cloc:] = chunkedge[:cloc]
## close super
io5.close()
clusters.close()
## edges is filled with splits for paired data.
LOGGER.info("done filling superseqs")
|
[
"Fills",
"the",
"superseqs",
"array",
"with",
"seq",
"data",
"from",
"cat",
".",
"clust",
"and",
"fill",
"the",
"edges",
"array",
"with",
"information",
"about",
"paired",
"split",
"locations",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1241-L1341
|
[
"def",
"fill_superseqs",
"(",
"data",
",",
"samples",
")",
":",
"## load super to get edges",
"io5",
"=",
"h5py",
".",
"File",
"(",
"data",
".",
"clust_database",
",",
"'r+'",
")",
"superseqs",
"=",
"io5",
"[",
"\"seqs\"",
"]",
"splits",
"=",
"io5",
"[",
"\"splits\"",
"]",
"## samples are already sorted",
"snames",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"LOGGER",
".",
"info",
"(",
"\"snames %s\"",
",",
"snames",
")",
"## get maxlen again",
"maxlen",
"=",
"data",
".",
"_hackersonly",
"[",
"\"max_fragment_length\"",
"]",
"+",
"20",
"LOGGER",
".",
"info",
"(",
"\"maxlen inside fill_superseqs is %s\"",
",",
"maxlen",
")",
"## data has to be entered in blocks",
"infile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catclust.gz\"",
")",
"clusters",
"=",
"gzip",
".",
"open",
"(",
"infile",
",",
"'r'",
")",
"pairdealer",
"=",
"itertools",
".",
"izip",
"(",
"*",
"[",
"iter",
"(",
"clusters",
")",
"]",
"*",
"2",
")",
"## iterate over clusters",
"chunks",
"=",
"superseqs",
".",
"attrs",
"[",
"\"chunksize\"",
"]",
"chunksize",
"=",
"chunks",
"[",
"0",
"]",
"done",
"=",
"0",
"iloc",
"=",
"0",
"cloc",
"=",
"0",
"chunkseqs",
"=",
"np",
".",
"zeros",
"(",
"chunks",
",",
"dtype",
"=",
"\"|S1\"",
")",
"chunkedge",
"=",
"np",
".",
"zeros",
"(",
"chunksize",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
"while",
"1",
":",
"try",
":",
"done",
",",
"chunk",
"=",
"clustdealer",
"(",
"pairdealer",
",",
"1",
")",
"except",
"IndexError",
":",
"raise",
"IPyradWarningExit",
"(",
"\"clustfile formatting error in %s\"",
",",
"chunk",
")",
"## if chunk is full put into superseqs and reset counter",
"if",
"cloc",
"==",
"chunksize",
":",
"LOGGER",
".",
"info",
"(",
"\"cloc chunk writing %s\"",
",",
"cloc",
")",
"superseqs",
"[",
"iloc",
"-",
"cloc",
":",
"iloc",
"]",
"=",
"chunkseqs",
"splits",
"[",
"iloc",
"-",
"cloc",
":",
"iloc",
"]",
"=",
"chunkedge",
"## reset chunkseqs, chunkedge, cloc",
"cloc",
"=",
"0",
"chunkseqs",
"=",
"np",
".",
"zeros",
"(",
"(",
"chunksize",
",",
"len",
"(",
"samples",
")",
",",
"maxlen",
")",
",",
"dtype",
"=",
"\"|S1\"",
")",
"chunkedge",
"=",
"np",
".",
"zeros",
"(",
"(",
"chunksize",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
"## get seq and split it",
"if",
"chunk",
":",
"try",
":",
"fill",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"samples",
")",
",",
"maxlen",
")",
",",
"dtype",
"=",
"\"|S1\"",
")",
"fill",
".",
"fill",
"(",
"\"N\"",
")",
"piece",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"names",
"=",
"piece",
"[",
"0",
":",
":",
"2",
"]",
"seqs",
"=",
"np",
".",
"array",
"(",
"[",
"list",
"(",
"i",
")",
"for",
"i",
"in",
"piece",
"[",
"1",
":",
":",
"2",
"]",
"]",
")",
"## fill in the separator if it exists",
"separator",
"=",
"np",
".",
"where",
"(",
"np",
".",
"all",
"(",
"seqs",
"==",
"'n'",
",",
"axis",
"=",
"0",
")",
")",
"[",
"0",
"]",
"if",
"np",
".",
"any",
"(",
"separator",
")",
":",
"chunkedge",
"[",
"cloc",
"]",
"=",
"separator",
".",
"min",
"(",
")",
"## fill in the hits",
"## seqs will be (5,) IF the seqs are variable lengths, which ",
"## can happen if it had duplicaes AND there were indels, and ",
"## so the indels did not get aligned",
"try",
":",
"shlen",
"=",
"seqs",
".",
"shape",
"[",
"1",
"]",
"except",
"IndexError",
"as",
"inst",
":",
"shlen",
"=",
"min",
"(",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"seqs",
"]",
")",
"for",
"name",
",",
"seq",
"in",
"zip",
"(",
"names",
",",
"seqs",
")",
":",
"sidx",
"=",
"snames",
".",
"index",
"(",
"name",
".",
"rsplit",
"(",
"\"_\"",
",",
"1",
")",
"[",
"0",
"]",
")",
"#fill[sidx, :shlen] = seq[:maxlen]",
"fill",
"[",
"sidx",
",",
":",
"shlen",
"]",
"=",
"seq",
"[",
":",
"shlen",
"]",
"## PUT seqs INTO local ARRAY",
"chunkseqs",
"[",
"cloc",
"]",
"=",
"fill",
"except",
"Exception",
"as",
"inst",
":",
"LOGGER",
".",
"info",
"(",
"inst",
")",
"LOGGER",
".",
"info",
"(",
"\"\\nfill: %s\\nshlen %s\\nmaxlen %s\"",
",",
"fill",
".",
"shape",
",",
"shlen",
",",
"maxlen",
")",
"LOGGER",
".",
"info",
"(",
"\"dupe chunk \\n{}\"",
".",
"format",
"(",
"\"\\n\"",
".",
"join",
"(",
"chunk",
")",
")",
")",
"## increase counters if there was a chunk",
"cloc",
"+=",
"1",
"iloc",
"+=",
"1",
"if",
"done",
":",
"break",
"## write final leftover chunk",
"superseqs",
"[",
"iloc",
"-",
"cloc",
":",
",",
"]",
"=",
"chunkseqs",
"[",
":",
"cloc",
"]",
"splits",
"[",
"iloc",
"-",
"cloc",
":",
"]",
"=",
"chunkedge",
"[",
":",
"cloc",
"]",
"## close super",
"io5",
".",
"close",
"(",
")",
"clusters",
".",
"close",
"(",
")",
"## edges is filled with splits for paired data.",
"LOGGER",
".",
"info",
"(",
"\"done filling superseqs\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
count_seeds
|
uses bash commands to quickly count N seeds from utemp file
|
ipyrad/assemble/cluster_across.py
|
def count_seeds(usort):
"""
uses bash commands to quickly count N seeds from utemp file
"""
with open(usort, 'r') as insort:
cmd1 = ["cut", "-f", "2"]
cmd2 = ["uniq"]
cmd3 = ["wc"]
proc1 = sps.Popen(cmd1, stdin=insort, stdout=sps.PIPE, close_fds=True)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True)
proc3 = sps.Popen(cmd3, stdin=proc2.stdout, stdout=sps.PIPE, close_fds=True)
res = proc3.communicate()
nseeds = int(res[0].split()[0])
proc1.stdout.close()
proc2.stdout.close()
proc3.stdout.close()
return nseeds
|
def count_seeds(usort):
"""
uses bash commands to quickly count N seeds from utemp file
"""
with open(usort, 'r') as insort:
cmd1 = ["cut", "-f", "2"]
cmd2 = ["uniq"]
cmd3 = ["wc"]
proc1 = sps.Popen(cmd1, stdin=insort, stdout=sps.PIPE, close_fds=True)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True)
proc3 = sps.Popen(cmd3, stdin=proc2.stdout, stdout=sps.PIPE, close_fds=True)
res = proc3.communicate()
nseeds = int(res[0].split()[0])
proc1.stdout.close()
proc2.stdout.close()
proc3.stdout.close()
return nseeds
|
[
"uses",
"bash",
"commands",
"to",
"quickly",
"count",
"N",
"seeds",
"from",
"utemp",
"file"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1348-L1364
|
[
"def",
"count_seeds",
"(",
"usort",
")",
":",
"with",
"open",
"(",
"usort",
",",
"'r'",
")",
"as",
"insort",
":",
"cmd1",
"=",
"[",
"\"cut\"",
",",
"\"-f\"",
",",
"\"2\"",
"]",
"cmd2",
"=",
"[",
"\"uniq\"",
"]",
"cmd3",
"=",
"[",
"\"wc\"",
"]",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmd1",
",",
"stdin",
"=",
"insort",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"proc2",
"=",
"sps",
".",
"Popen",
"(",
"cmd2",
",",
"stdin",
"=",
"proc1",
".",
"stdout",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"proc3",
"=",
"sps",
".",
"Popen",
"(",
"cmd3",
",",
"stdin",
"=",
"proc2",
".",
"stdout",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"res",
"=",
"proc3",
".",
"communicate",
"(",
")",
"nseeds",
"=",
"int",
"(",
"res",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"proc1",
".",
"stdout",
".",
"close",
"(",
")",
"proc2",
".",
"stdout",
".",
"close",
"(",
")",
"proc3",
".",
"stdout",
".",
"close",
"(",
")",
"return",
"nseeds"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
sort_seeds
|
sort seeds from cluster results
|
ipyrad/assemble/cluster_across.py
|
def sort_seeds(uhandle, usort):
""" sort seeds from cluster results"""
cmd = ["sort", "-k", "2", uhandle, "-o", usort]
proc = sps.Popen(cmd, close_fds=True)
proc.communicate()
|
def sort_seeds(uhandle, usort):
""" sort seeds from cluster results"""
cmd = ["sort", "-k", "2", uhandle, "-o", usort]
proc = sps.Popen(cmd, close_fds=True)
proc.communicate()
|
[
"sort",
"seeds",
"from",
"cluster",
"results"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1368-L1372
|
[
"def",
"sort_seeds",
"(",
"uhandle",
",",
"usort",
")",
":",
"cmd",
"=",
"[",
"\"sort\"",
",",
"\"-k\"",
",",
"\"2\"",
",",
"uhandle",
",",
"\"-o\"",
",",
"usort",
"]",
"proc",
"=",
"sps",
".",
"Popen",
"(",
"cmd",
",",
"close_fds",
"=",
"True",
")",
"proc",
".",
"communicate",
"(",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
build_clustbits
|
Reconstitutes clusters from .utemp and htemp files and writes them
to chunked files for aligning in muscle.
|
ipyrad/assemble/cluster_across.py
|
def build_clustbits(data, ipyclient, force):
"""
Reconstitutes clusters from .utemp and htemp files and writes them
to chunked files for aligning in muscle.
"""
## If you run this step then we clear all tmp .fa and .indel.h5 files
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
os.mkdir(data.tmpdir)
## parallel client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " building clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
uhandle = os.path.join(data.dirs.across, data.name+".utemp")
usort = os.path.join(data.dirs.across, data.name+".utemp.sort")
async1 = ""
## skip usorting if not force and already exists
if not os.path.exists(usort) or force:
## send sort job to engines. Sorted seeds allows us to work through
## the utemp file one locus at a time instead of reading all into mem.
LOGGER.info("building reads file -- loading utemp file into mem")
async1 = lbview.apply(sort_seeds, *(uhandle, usort))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
if async1.ready():
break
else:
time.sleep(0.1)
## send count seeds job to engines.
async2 = lbview.apply(count_seeds, usort)
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 1, printstr.format(elapsed), spacer=data._spacer)
if async2.ready():
break
else:
time.sleep(0.1)
## wait for both to finish while printing progress timer
nseeds = async2.result()
## send the clust bit building job to work and track progress
async3 = lbview.apply(sub_build_clustbits, *(data, usort, nseeds))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 2, printstr.format(elapsed), spacer=data._spacer)
if async3.ready():
break
else:
time.sleep(0.1)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 3, printstr.format(elapsed), spacer=data._spacer)
print("")
## check for errors
for job in [async1, async2, async3]:
try:
if not job.successful():
raise IPyradWarningExit(job.result())
except AttributeError:
## If we skip usorting then async1 == "" so the call to
## successful() raises, but we can ignore it.
pass
|
def build_clustbits(data, ipyclient, force):
"""
Reconstitutes clusters from .utemp and htemp files and writes them
to chunked files for aligning in muscle.
"""
## If you run this step then we clear all tmp .fa and .indel.h5 files
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
os.mkdir(data.tmpdir)
## parallel client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " building clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
uhandle = os.path.join(data.dirs.across, data.name+".utemp")
usort = os.path.join(data.dirs.across, data.name+".utemp.sort")
async1 = ""
## skip usorting if not force and already exists
if not os.path.exists(usort) or force:
## send sort job to engines. Sorted seeds allows us to work through
## the utemp file one locus at a time instead of reading all into mem.
LOGGER.info("building reads file -- loading utemp file into mem")
async1 = lbview.apply(sort_seeds, *(uhandle, usort))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
if async1.ready():
break
else:
time.sleep(0.1)
## send count seeds job to engines.
async2 = lbview.apply(count_seeds, usort)
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 1, printstr.format(elapsed), spacer=data._spacer)
if async2.ready():
break
else:
time.sleep(0.1)
## wait for both to finish while printing progress timer
nseeds = async2.result()
## send the clust bit building job to work and track progress
async3 = lbview.apply(sub_build_clustbits, *(data, usort, nseeds))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 2, printstr.format(elapsed), spacer=data._spacer)
if async3.ready():
break
else:
time.sleep(0.1)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 3, printstr.format(elapsed), spacer=data._spacer)
print("")
## check for errors
for job in [async1, async2, async3]:
try:
if not job.successful():
raise IPyradWarningExit(job.result())
except AttributeError:
## If we skip usorting then async1 == "" so the call to
## successful() raises, but we can ignore it.
pass
|
[
"Reconstitutes",
"clusters",
"from",
".",
"utemp",
"and",
"htemp",
"files",
"and",
"writes",
"them",
"to",
"chunked",
"files",
"for",
"aligning",
"in",
"muscle",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1376-L1447
|
[
"def",
"build_clustbits",
"(",
"data",
",",
"ipyclient",
",",
"force",
")",
":",
"## If you run this step then we clear all tmp .fa and .indel.h5 files",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"tmpdir",
")",
":",
"shutil",
".",
"rmtree",
"(",
"data",
".",
"tmpdir",
")",
"os",
".",
"mkdir",
"(",
"data",
".",
"tmpdir",
")",
"## parallel client",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" building clusters | {} | s6 |\"",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"3",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"uhandle",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".utemp\"",
")",
"usort",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".utemp.sort\"",
")",
"async1",
"=",
"\"\"",
"## skip usorting if not force and already exists",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"usort",
")",
"or",
"force",
":",
"## send sort job to engines. Sorted seeds allows us to work through",
"## the utemp file one locus at a time instead of reading all into mem.",
"LOGGER",
".",
"info",
"(",
"\"building reads file -- loading utemp file into mem\"",
")",
"async1",
"=",
"lbview",
".",
"apply",
"(",
"sort_seeds",
",",
"*",
"(",
"uhandle",
",",
"usort",
")",
")",
"while",
"1",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"3",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"if",
"async1",
".",
"ready",
"(",
")",
":",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## send count seeds job to engines.",
"async2",
"=",
"lbview",
".",
"apply",
"(",
"count_seeds",
",",
"usort",
")",
"while",
"1",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"3",
",",
"1",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"if",
"async2",
".",
"ready",
"(",
")",
":",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"## wait for both to finish while printing progress timer",
"nseeds",
"=",
"async2",
".",
"result",
"(",
")",
"## send the clust bit building job to work and track progress",
"async3",
"=",
"lbview",
".",
"apply",
"(",
"sub_build_clustbits",
",",
"*",
"(",
"data",
",",
"usort",
",",
"nseeds",
")",
")",
"while",
"1",
":",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"3",
",",
"2",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"if",
"async3",
".",
"ready",
"(",
")",
":",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"3",
",",
"3",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"print",
"(",
"\"\"",
")",
"## check for errors",
"for",
"job",
"in",
"[",
"async1",
",",
"async2",
",",
"async3",
"]",
":",
"try",
":",
"if",
"not",
"job",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"job",
".",
"result",
"(",
")",
")",
"except",
"AttributeError",
":",
"## If we skip usorting then async1 == \"\" so the call to",
"## successful() raises, but we can ignore it.",
"pass"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
sub_build_clustbits
|
A subfunction of build_clustbits to allow progress tracking. This func
splits the unaligned clusters into bits for aligning on separate cores.
|
ipyrad/assemble/cluster_across.py
|
def sub_build_clustbits(data, usort, nseeds):
"""
A subfunction of build_clustbits to allow progress tracking. This func
splits the unaligned clusters into bits for aligning on separate cores.
"""
## load FULL concat fasta file into a dict. This could cause RAM issues.
## this file has iupac codes in it, not ambigs resolved, and is gzipped.
LOGGER.info("loading full _catcons file into memory")
allcons = {}
conshandle = os.path.join(data.dirs.across, data.name+"_catcons.tmp")
with gzip.open(conshandle, 'rb') as iocons:
cons = itertools.izip(*[iter(iocons)]*2)
for namestr, seq in cons:
nnn, sss = [i.strip() for i in namestr, seq]
allcons[nnn[1:]] = sss
## set optim to approximately 4 chunks per core. Smaller allows for a bit
## cleaner looking progress bar. 40 cores will make 160 files.
optim = ((nseeds // (data.cpus*4)) + (nseeds % (data.cpus*4)))
LOGGER.info("building clustbits, optim=%s, nseeds=%s, cpus=%s",
optim, nseeds, data.cpus)
## iterate through usort grabbing seeds and matches
with open(usort, 'rb') as insort:
## iterator, seed null, and seqlist null
isort = iter(insort)
loci = 0
lastseed = 0
fseqs = []
seqlist = []
seqsize = 0
while 1:
## grab the next line
try:
hit, seed, ori = isort.next().strip().split()
except StopIteration:
break
try:
## if same seed, append match
if seed != lastseed:
## store the last fseq, count it, and clear it
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
fseqs = []
## occasionally write to file
if seqsize >= optim:
if seqlist:
loci += seqsize
with open(os.path.join(data.tmpdir,
data.name+".chunk_{}".format(loci)), 'w') as clustsout:
LOGGER.debug("writing chunk - seqsize {} loci {} {}".format(seqsize, loci, clustsout.name))
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
seqsize = 0
## store the new seed on top of fseq
fseqs.append(">{}\n{}".format(seed, allcons[seed]))
lastseed = seed
## add match to the seed
seq = allcons[hit]
## revcomp if orientation is reversed
if ori == "-":
seq = fullcomp(seq)[::-1]
fseqs.append(">{}\n{}".format(hit, seq))
except KeyError as inst:
## Caught bad seed or hit? Log and continue.
LOGGER.error("Bad Seed/Hit: seqsize {}\tloci {}\tseed {}\thit {}".format(seqsize, loci, seed, hit))
## write whatever is left over to the clusts file
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
loci += seqsize
if seqlist:
with open(os.path.join(data.tmpdir,
data.name+".chunk_{}".format(loci)), 'w') as clustsout:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## final progress and cleanup
del allcons
clustbits = glob.glob(os.path.join(data.tmpdir, data.name+".chunk_*"))
## return stuff
return clustbits, loci
|
def sub_build_clustbits(data, usort, nseeds):
"""
A subfunction of build_clustbits to allow progress tracking. This func
splits the unaligned clusters into bits for aligning on separate cores.
"""
## load FULL concat fasta file into a dict. This could cause RAM issues.
## this file has iupac codes in it, not ambigs resolved, and is gzipped.
LOGGER.info("loading full _catcons file into memory")
allcons = {}
conshandle = os.path.join(data.dirs.across, data.name+"_catcons.tmp")
with gzip.open(conshandle, 'rb') as iocons:
cons = itertools.izip(*[iter(iocons)]*2)
for namestr, seq in cons:
nnn, sss = [i.strip() for i in namestr, seq]
allcons[nnn[1:]] = sss
## set optim to approximately 4 chunks per core. Smaller allows for a bit
## cleaner looking progress bar. 40 cores will make 160 files.
optim = ((nseeds // (data.cpus*4)) + (nseeds % (data.cpus*4)))
LOGGER.info("building clustbits, optim=%s, nseeds=%s, cpus=%s",
optim, nseeds, data.cpus)
## iterate through usort grabbing seeds and matches
with open(usort, 'rb') as insort:
## iterator, seed null, and seqlist null
isort = iter(insort)
loci = 0
lastseed = 0
fseqs = []
seqlist = []
seqsize = 0
while 1:
## grab the next line
try:
hit, seed, ori = isort.next().strip().split()
except StopIteration:
break
try:
## if same seed, append match
if seed != lastseed:
## store the last fseq, count it, and clear it
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
fseqs = []
## occasionally write to file
if seqsize >= optim:
if seqlist:
loci += seqsize
with open(os.path.join(data.tmpdir,
data.name+".chunk_{}".format(loci)), 'w') as clustsout:
LOGGER.debug("writing chunk - seqsize {} loci {} {}".format(seqsize, loci, clustsout.name))
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
seqsize = 0
## store the new seed on top of fseq
fseqs.append(">{}\n{}".format(seed, allcons[seed]))
lastseed = seed
## add match to the seed
seq = allcons[hit]
## revcomp if orientation is reversed
if ori == "-":
seq = fullcomp(seq)[::-1]
fseqs.append(">{}\n{}".format(hit, seq))
except KeyError as inst:
## Caught bad seed or hit? Log and continue.
LOGGER.error("Bad Seed/Hit: seqsize {}\tloci {}\tseed {}\thit {}".format(seqsize, loci, seed, hit))
## write whatever is left over to the clusts file
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
loci += seqsize
if seqlist:
with open(os.path.join(data.tmpdir,
data.name+".chunk_{}".format(loci)), 'w') as clustsout:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## final progress and cleanup
del allcons
clustbits = glob.glob(os.path.join(data.tmpdir, data.name+".chunk_*"))
## return stuff
return clustbits, loci
|
[
"A",
"subfunction",
"of",
"build_clustbits",
"to",
"allow",
"progress",
"tracking",
".",
"This",
"func",
"splits",
"the",
"unaligned",
"clusters",
"into",
"bits",
"for",
"aligning",
"on",
"separate",
"cores",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1450-L1539
|
[
"def",
"sub_build_clustbits",
"(",
"data",
",",
"usort",
",",
"nseeds",
")",
":",
"## load FULL concat fasta file into a dict. This could cause RAM issues.",
"## this file has iupac codes in it, not ambigs resolved, and is gzipped.",
"LOGGER",
".",
"info",
"(",
"\"loading full _catcons file into memory\"",
")",
"allcons",
"=",
"{",
"}",
"conshandle",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catcons.tmp\"",
")",
"with",
"gzip",
".",
"open",
"(",
"conshandle",
",",
"'rb'",
")",
"as",
"iocons",
":",
"cons",
"=",
"itertools",
".",
"izip",
"(",
"*",
"[",
"iter",
"(",
"iocons",
")",
"]",
"*",
"2",
")",
"for",
"namestr",
",",
"seq",
"in",
"cons",
":",
"nnn",
",",
"sss",
"=",
"[",
"i",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"namestr",
",",
"seq",
"]",
"allcons",
"[",
"nnn",
"[",
"1",
":",
"]",
"]",
"=",
"sss",
"## set optim to approximately 4 chunks per core. Smaller allows for a bit",
"## cleaner looking progress bar. 40 cores will make 160 files.",
"optim",
"=",
"(",
"(",
"nseeds",
"//",
"(",
"data",
".",
"cpus",
"*",
"4",
")",
")",
"+",
"(",
"nseeds",
"%",
"(",
"data",
".",
"cpus",
"*",
"4",
")",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"building clustbits, optim=%s, nseeds=%s, cpus=%s\"",
",",
"optim",
",",
"nseeds",
",",
"data",
".",
"cpus",
")",
"## iterate through usort grabbing seeds and matches",
"with",
"open",
"(",
"usort",
",",
"'rb'",
")",
"as",
"insort",
":",
"## iterator, seed null, and seqlist null",
"isort",
"=",
"iter",
"(",
"insort",
")",
"loci",
"=",
"0",
"lastseed",
"=",
"0",
"fseqs",
"=",
"[",
"]",
"seqlist",
"=",
"[",
"]",
"seqsize",
"=",
"0",
"while",
"1",
":",
"## grab the next line",
"try",
":",
"hit",
",",
"seed",
",",
"ori",
"=",
"isort",
".",
"next",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"except",
"StopIteration",
":",
"break",
"try",
":",
"## if same seed, append match",
"if",
"seed",
"!=",
"lastseed",
":",
"## store the last fseq, count it, and clear it",
"if",
"fseqs",
":",
"seqlist",
".",
"append",
"(",
"\"\\n\"",
".",
"join",
"(",
"fseqs",
")",
")",
"seqsize",
"+=",
"1",
"fseqs",
"=",
"[",
"]",
"## occasionally write to file",
"if",
"seqsize",
">=",
"optim",
":",
"if",
"seqlist",
":",
"loci",
"+=",
"seqsize",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"data",
".",
"name",
"+",
"\".chunk_{}\"",
".",
"format",
"(",
"loci",
")",
")",
",",
"'w'",
")",
"as",
"clustsout",
":",
"LOGGER",
".",
"debug",
"(",
"\"writing chunk - seqsize {} loci {} {}\"",
".",
"format",
"(",
"seqsize",
",",
"loci",
",",
"clustsout",
".",
"name",
")",
")",
"clustsout",
".",
"write",
"(",
"\"\\n//\\n//\\n\"",
".",
"join",
"(",
"seqlist",
")",
"+",
"\"\\n//\\n//\\n\"",
")",
"## reset list and counter",
"seqlist",
"=",
"[",
"]",
"seqsize",
"=",
"0",
"## store the new seed on top of fseq",
"fseqs",
".",
"append",
"(",
"\">{}\\n{}\"",
".",
"format",
"(",
"seed",
",",
"allcons",
"[",
"seed",
"]",
")",
")",
"lastseed",
"=",
"seed",
"## add match to the seed",
"seq",
"=",
"allcons",
"[",
"hit",
"]",
"## revcomp if orientation is reversed",
"if",
"ori",
"==",
"\"-\"",
":",
"seq",
"=",
"fullcomp",
"(",
"seq",
")",
"[",
":",
":",
"-",
"1",
"]",
"fseqs",
".",
"append",
"(",
"\">{}\\n{}\"",
".",
"format",
"(",
"hit",
",",
"seq",
")",
")",
"except",
"KeyError",
"as",
"inst",
":",
"## Caught bad seed or hit? Log and continue.",
"LOGGER",
".",
"error",
"(",
"\"Bad Seed/Hit: seqsize {}\\tloci {}\\tseed {}\\thit {}\"",
".",
"format",
"(",
"seqsize",
",",
"loci",
",",
"seed",
",",
"hit",
")",
")",
"## write whatever is left over to the clusts file",
"if",
"fseqs",
":",
"seqlist",
".",
"append",
"(",
"\"\\n\"",
".",
"join",
"(",
"fseqs",
")",
")",
"seqsize",
"+=",
"1",
"loci",
"+=",
"seqsize",
"if",
"seqlist",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"data",
".",
"name",
"+",
"\".chunk_{}\"",
".",
"format",
"(",
"loci",
")",
")",
",",
"'w'",
")",
"as",
"clustsout",
":",
"clustsout",
".",
"write",
"(",
"\"\\n//\\n//\\n\"",
".",
"join",
"(",
"seqlist",
")",
"+",
"\"\\n//\\n//\\n\"",
")",
"## final progress and cleanup",
"del",
"allcons",
"clustbits",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"data",
".",
"name",
"+",
"\".chunk_*\"",
")",
")",
"## return stuff",
"return",
"clustbits",
",",
"loci"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
build_input_file
|
[This is run on an ipengine]
Make a concatenated consens file with sampled alleles (no RSWYMK/rswymk).
Orders reads by length and shuffles randomly within length classes
|
ipyrad/assemble/cluster_across.py
|
def build_input_file(data, samples, randomseed):
"""
[This is run on an ipengine]
Make a concatenated consens file with sampled alleles (no RSWYMK/rswymk).
Orders reads by length and shuffles randomly within length classes
"""
## get all of the consens handles for samples that have consens reads
## this is better than using sample.files.consens for selecting files
## b/c if they were moved we only have to edit data.dirs.consens
## scratch the statement above, people shouldn't be moving files,
## they should be using merge/branch, and so sample.files.consens
## is needed to keep track of samples from different dirs if they
## are later merged into the same assembly.
#conshandles = [os.path.join(data.dirs.consens, sample.name+".consens.gz") \
# for sample in samples if \
# sample.stats.reads_consens]
conshandles = [sample.files.consens[0] \
for sample in samples if \
sample.stats.reads_consens]
conshandles.sort()
assert conshandles, "no consensus files found"
## concatenate all of the gzipped consens files
cmd = ['cat'] + conshandles
#allcons = os.path.join(data.dirs.consens, data.name+"_catcons.tmp")
allcons = os.path.join(data.dirs.across, data.name+"_catcons.tmp")
LOGGER.debug(" ".join(cmd))
with open(allcons, 'w') as output:
call = sps.Popen(cmd, stdout=output, close_fds=True)
call.communicate()
## a string of sed substitutions for temporarily replacing hetero sites
## skips lines with '>', so it doesn't affect taxon names
subs = ["/>/!s/W/A/g", "/>/!s/w/A/g", "/>/!s/R/A/g", "/>/!s/r/A/g",
"/>/!s/M/A/g", "/>/!s/m/A/g", "/>/!s/K/T/g", "/>/!s/k/T/g",
"/>/!s/S/C/g", "/>/!s/s/C/g", "/>/!s/Y/C/g", "/>/!s/y/C/g"]
subs = ";".join(subs)
## impute pseudo-haplo information to avoid mismatch at hetero sites
## the read data with hetero sites is put back into clustered data later.
## pipe passed data from gunzip to sed.
cmd1 = ["gunzip", "-c", allcons]
cmd2 = ["sed", subs]
LOGGER.debug(" ".join(cmd1))
proc1 = sps.Popen(cmd1, stdout=sps.PIPE, close_fds=True)
allhaps = allcons.replace("_catcons.tmp", "_cathaps.tmp")
with open(allhaps, 'w') as output:
LOGGER.debug(" ".join(cmd2))
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=output, close_fds=True)
proc2.communicate()
proc1.stdout.close()
## now sort the file using vsearch
allsort = allcons.replace("_catcons.tmp", "_catsort.tmp")
cmd1 = [ipyrad.bins.vsearch,
"--sortbylength", allhaps,
"--fasta_width", "0",
"--output", allsort]
LOGGER.debug(" ".join(cmd1))
proc1 = sps.Popen(cmd1, close_fds=True)
proc1.communicate()
## shuffle sequences within size classes. Tested seed (8/31/2016)
## shuffling works repeatably with seed.
random.seed(randomseed)
## open an iterator to lengthsorted file and grab two lines at at time
allshuf = allcons.replace("_catcons.tmp", "_catshuf.tmp")
outdat = open(allshuf, 'w')
indat = open(allsort, 'r')
idat = itertools.izip(iter(indat), iter(indat))
done = 0
chunk = [idat.next()]
while not done:
## grab 2-lines until they become shorter (unless there's only one)
oldlen = len(chunk[-1][-1])
while 1:
try:
dat = idat.next()
except StopIteration:
done = 1
break
if len(dat[-1]) == oldlen:
chunk.append(dat)
else:
## send the last chunk off to be processed
random.shuffle(chunk)
outdat.write("".join(itertools.chain(*chunk)))
## start new chunk
chunk = [dat]
break
## do the last chunk
random.shuffle(chunk)
outdat.write("".join(itertools.chain(*chunk)))
indat.close()
outdat.close()
|
def build_input_file(data, samples, randomseed):
"""
[This is run on an ipengine]
Make a concatenated consens file with sampled alleles (no RSWYMK/rswymk).
Orders reads by length and shuffles randomly within length classes
"""
## get all of the consens handles for samples that have consens reads
## this is better than using sample.files.consens for selecting files
## b/c if they were moved we only have to edit data.dirs.consens
## scratch the statement above, people shouldn't be moving files,
## they should be using merge/branch, and so sample.files.consens
## is needed to keep track of samples from different dirs if they
## are later merged into the same assembly.
#conshandles = [os.path.join(data.dirs.consens, sample.name+".consens.gz") \
# for sample in samples if \
# sample.stats.reads_consens]
conshandles = [sample.files.consens[0] \
for sample in samples if \
sample.stats.reads_consens]
conshandles.sort()
assert conshandles, "no consensus files found"
## concatenate all of the gzipped consens files
cmd = ['cat'] + conshandles
#allcons = os.path.join(data.dirs.consens, data.name+"_catcons.tmp")
allcons = os.path.join(data.dirs.across, data.name+"_catcons.tmp")
LOGGER.debug(" ".join(cmd))
with open(allcons, 'w') as output:
call = sps.Popen(cmd, stdout=output, close_fds=True)
call.communicate()
## a string of sed substitutions for temporarily replacing hetero sites
## skips lines with '>', so it doesn't affect taxon names
subs = ["/>/!s/W/A/g", "/>/!s/w/A/g", "/>/!s/R/A/g", "/>/!s/r/A/g",
"/>/!s/M/A/g", "/>/!s/m/A/g", "/>/!s/K/T/g", "/>/!s/k/T/g",
"/>/!s/S/C/g", "/>/!s/s/C/g", "/>/!s/Y/C/g", "/>/!s/y/C/g"]
subs = ";".join(subs)
## impute pseudo-haplo information to avoid mismatch at hetero sites
## the read data with hetero sites is put back into clustered data later.
## pipe passed data from gunzip to sed.
cmd1 = ["gunzip", "-c", allcons]
cmd2 = ["sed", subs]
LOGGER.debug(" ".join(cmd1))
proc1 = sps.Popen(cmd1, stdout=sps.PIPE, close_fds=True)
allhaps = allcons.replace("_catcons.tmp", "_cathaps.tmp")
with open(allhaps, 'w') as output:
LOGGER.debug(" ".join(cmd2))
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=output, close_fds=True)
proc2.communicate()
proc1.stdout.close()
## now sort the file using vsearch
allsort = allcons.replace("_catcons.tmp", "_catsort.tmp")
cmd1 = [ipyrad.bins.vsearch,
"--sortbylength", allhaps,
"--fasta_width", "0",
"--output", allsort]
LOGGER.debug(" ".join(cmd1))
proc1 = sps.Popen(cmd1, close_fds=True)
proc1.communicate()
## shuffle sequences within size classes. Tested seed (8/31/2016)
## shuffling works repeatably with seed.
random.seed(randomseed)
## open an iterator to lengthsorted file and grab two lines at at time
allshuf = allcons.replace("_catcons.tmp", "_catshuf.tmp")
outdat = open(allshuf, 'w')
indat = open(allsort, 'r')
idat = itertools.izip(iter(indat), iter(indat))
done = 0
chunk = [idat.next()]
while not done:
## grab 2-lines until they become shorter (unless there's only one)
oldlen = len(chunk[-1][-1])
while 1:
try:
dat = idat.next()
except StopIteration:
done = 1
break
if len(dat[-1]) == oldlen:
chunk.append(dat)
else:
## send the last chunk off to be processed
random.shuffle(chunk)
outdat.write("".join(itertools.chain(*chunk)))
## start new chunk
chunk = [dat]
break
## do the last chunk
random.shuffle(chunk)
outdat.write("".join(itertools.chain(*chunk)))
indat.close()
outdat.close()
|
[
"[",
"This",
"is",
"run",
"on",
"an",
"ipengine",
"]",
"Make",
"a",
"concatenated",
"consens",
"file",
"with",
"sampled",
"alleles",
"(",
"no",
"RSWYMK",
"/",
"rswymk",
")",
".",
"Orders",
"reads",
"by",
"length",
"and",
"shuffles",
"randomly",
"within",
"length",
"classes"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1543-L1643
|
[
"def",
"build_input_file",
"(",
"data",
",",
"samples",
",",
"randomseed",
")",
":",
"## get all of the consens handles for samples that have consens reads",
"## this is better than using sample.files.consens for selecting files",
"## b/c if they were moved we only have to edit data.dirs.consens",
"## scratch the statement above, people shouldn't be moving files, ",
"## they should be using merge/branch, and so sample.files.consens",
"## is needed to keep track of samples from different dirs if they",
"## are later merged into the same assembly.",
"#conshandles = [os.path.join(data.dirs.consens, sample.name+\".consens.gz\") \\",
"# for sample in samples if \\",
"# sample.stats.reads_consens]",
"conshandles",
"=",
"[",
"sample",
".",
"files",
".",
"consens",
"[",
"0",
"]",
"for",
"sample",
"in",
"samples",
"if",
"sample",
".",
"stats",
".",
"reads_consens",
"]",
"conshandles",
".",
"sort",
"(",
")",
"assert",
"conshandles",
",",
"\"no consensus files found\"",
"## concatenate all of the gzipped consens files",
"cmd",
"=",
"[",
"'cat'",
"]",
"+",
"conshandles",
"#allcons = os.path.join(data.dirs.consens, data.name+\"_catcons.tmp\")",
"allcons",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catcons.tmp\"",
")",
"LOGGER",
".",
"debug",
"(",
"\" \"",
".",
"join",
"(",
"cmd",
")",
")",
"with",
"open",
"(",
"allcons",
",",
"'w'",
")",
"as",
"output",
":",
"call",
"=",
"sps",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"output",
",",
"close_fds",
"=",
"True",
")",
"call",
".",
"communicate",
"(",
")",
"## a string of sed substitutions for temporarily replacing hetero sites",
"## skips lines with '>', so it doesn't affect taxon names",
"subs",
"=",
"[",
"\"/>/!s/W/A/g\"",
",",
"\"/>/!s/w/A/g\"",
",",
"\"/>/!s/R/A/g\"",
",",
"\"/>/!s/r/A/g\"",
",",
"\"/>/!s/M/A/g\"",
",",
"\"/>/!s/m/A/g\"",
",",
"\"/>/!s/K/T/g\"",
",",
"\"/>/!s/k/T/g\"",
",",
"\"/>/!s/S/C/g\"",
",",
"\"/>/!s/s/C/g\"",
",",
"\"/>/!s/Y/C/g\"",
",",
"\"/>/!s/y/C/g\"",
"]",
"subs",
"=",
"\";\"",
".",
"join",
"(",
"subs",
")",
"## impute pseudo-haplo information to avoid mismatch at hetero sites",
"## the read data with hetero sites is put back into clustered data later.",
"## pipe passed data from gunzip to sed.",
"cmd1",
"=",
"[",
"\"gunzip\"",
",",
"\"-c\"",
",",
"allcons",
"]",
"cmd2",
"=",
"[",
"\"sed\"",
",",
"subs",
"]",
"LOGGER",
".",
"debug",
"(",
"\" \"",
".",
"join",
"(",
"cmd1",
")",
")",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmd1",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"allhaps",
"=",
"allcons",
".",
"replace",
"(",
"\"_catcons.tmp\"",
",",
"\"_cathaps.tmp\"",
")",
"with",
"open",
"(",
"allhaps",
",",
"'w'",
")",
"as",
"output",
":",
"LOGGER",
".",
"debug",
"(",
"\" \"",
".",
"join",
"(",
"cmd2",
")",
")",
"proc2",
"=",
"sps",
".",
"Popen",
"(",
"cmd2",
",",
"stdin",
"=",
"proc1",
".",
"stdout",
",",
"stdout",
"=",
"output",
",",
"close_fds",
"=",
"True",
")",
"proc2",
".",
"communicate",
"(",
")",
"proc1",
".",
"stdout",
".",
"close",
"(",
")",
"## now sort the file using vsearch",
"allsort",
"=",
"allcons",
".",
"replace",
"(",
"\"_catcons.tmp\"",
",",
"\"_catsort.tmp\"",
")",
"cmd1",
"=",
"[",
"ipyrad",
".",
"bins",
".",
"vsearch",
",",
"\"--sortbylength\"",
",",
"allhaps",
",",
"\"--fasta_width\"",
",",
"\"0\"",
",",
"\"--output\"",
",",
"allsort",
"]",
"LOGGER",
".",
"debug",
"(",
"\" \"",
".",
"join",
"(",
"cmd1",
")",
")",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmd1",
",",
"close_fds",
"=",
"True",
")",
"proc1",
".",
"communicate",
"(",
")",
"## shuffle sequences within size classes. Tested seed (8/31/2016)",
"## shuffling works repeatably with seed.",
"random",
".",
"seed",
"(",
"randomseed",
")",
"## open an iterator to lengthsorted file and grab two lines at at time",
"allshuf",
"=",
"allcons",
".",
"replace",
"(",
"\"_catcons.tmp\"",
",",
"\"_catshuf.tmp\"",
")",
"outdat",
"=",
"open",
"(",
"allshuf",
",",
"'w'",
")",
"indat",
"=",
"open",
"(",
"allsort",
",",
"'r'",
")",
"idat",
"=",
"itertools",
".",
"izip",
"(",
"iter",
"(",
"indat",
")",
",",
"iter",
"(",
"indat",
")",
")",
"done",
"=",
"0",
"chunk",
"=",
"[",
"idat",
".",
"next",
"(",
")",
"]",
"while",
"not",
"done",
":",
"## grab 2-lines until they become shorter (unless there's only one)",
"oldlen",
"=",
"len",
"(",
"chunk",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
")",
"while",
"1",
":",
"try",
":",
"dat",
"=",
"idat",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"done",
"=",
"1",
"break",
"if",
"len",
"(",
"dat",
"[",
"-",
"1",
"]",
")",
"==",
"oldlen",
":",
"chunk",
".",
"append",
"(",
"dat",
")",
"else",
":",
"## send the last chunk off to be processed",
"random",
".",
"shuffle",
"(",
"chunk",
")",
"outdat",
".",
"write",
"(",
"\"\"",
".",
"join",
"(",
"itertools",
".",
"chain",
"(",
"*",
"chunk",
")",
")",
")",
"## start new chunk",
"chunk",
"=",
"[",
"dat",
"]",
"break",
"## do the last chunk",
"random",
".",
"shuffle",
"(",
"chunk",
")",
"outdat",
".",
"write",
"(",
"\"\"",
".",
"join",
"(",
"itertools",
".",
"chain",
"(",
"*",
"chunk",
")",
")",
")",
"indat",
".",
"close",
"(",
")",
"outdat",
".",
"close",
"(",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
clean_and_build_concat
|
STEP 6-1:
Clears dirs and databases and calls 'build_input_file()'
|
ipyrad/assemble/cluster_across.py
|
def clean_and_build_concat(data, samples, randomseed, ipyclient):
"""
STEP 6-1:
Clears dirs and databases and calls 'build_input_file()'
"""
## but check for new clust database name if this is a new branch
cleanup_tempfiles(data)
catclust = os.path.join(data.dirs.across, data.name+"_catclust.gz")
if os.path.exists(catclust):
os.remove(catclust)
if os.path.exists(data.clust_database):
os.remove(data.clust_database)
## get parallel view
start = time.time()
printstr = " concat/shuffle input | {} | s6 |"
## make a vsearch input fasta file with all samples reads concat
async = ipyclient[0].apply(build_input_file, *[data, samples, randomseed])
while 1:
ready = int(async.ready())
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(1, ready, printstr.format(elapsed), spacer=data._spacer)
if ready:
break
else:
time.sleep(0.1)
print("")
## store that this step was successful
if not async.successful():
raise IPyradWarningExit(async.result())
|
def clean_and_build_concat(data, samples, randomseed, ipyclient):
"""
STEP 6-1:
Clears dirs and databases and calls 'build_input_file()'
"""
## but check for new clust database name if this is a new branch
cleanup_tempfiles(data)
catclust = os.path.join(data.dirs.across, data.name+"_catclust.gz")
if os.path.exists(catclust):
os.remove(catclust)
if os.path.exists(data.clust_database):
os.remove(data.clust_database)
## get parallel view
start = time.time()
printstr = " concat/shuffle input | {} | s6 |"
## make a vsearch input fasta file with all samples reads concat
async = ipyclient[0].apply(build_input_file, *[data, samples, randomseed])
while 1:
ready = int(async.ready())
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(1, ready, printstr.format(elapsed), spacer=data._spacer)
if ready:
break
else:
time.sleep(0.1)
print("")
## store that this step was successful
if not async.successful():
raise IPyradWarningExit(async.result())
|
[
"STEP",
"6",
"-",
"1",
":",
"Clears",
"dirs",
"and",
"databases",
"and",
"calls",
"build_input_file",
"()"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1647-L1678
|
[
"def",
"clean_and_build_concat",
"(",
"data",
",",
"samples",
",",
"randomseed",
",",
"ipyclient",
")",
":",
"## but check for new clust database name if this is a new branch",
"cleanup_tempfiles",
"(",
"data",
")",
"catclust",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catclust.gz\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"catclust",
")",
":",
"os",
".",
"remove",
"(",
"catclust",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"clust_database",
")",
":",
"os",
".",
"remove",
"(",
"data",
".",
"clust_database",
")",
"## get parallel view",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" concat/shuffle input | {} | s6 |\"",
"## make a vsearch input fasta file with all samples reads concat",
"async",
"=",
"ipyclient",
"[",
"0",
"]",
".",
"apply",
"(",
"build_input_file",
",",
"*",
"[",
"data",
",",
"samples",
",",
"randomseed",
"]",
")",
"while",
"1",
":",
"ready",
"=",
"int",
"(",
"async",
".",
"ready",
"(",
")",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"1",
",",
"ready",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"if",
"ready",
":",
"break",
"else",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"print",
"(",
"\"\"",
")",
"## store that this step was successful",
"if",
"not",
"async",
".",
"successful",
"(",
")",
":",
"raise",
"IPyradWarningExit",
"(",
"async",
".",
"result",
"(",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
run
|
For step 6 the run function is sub divided a bit so that users with really
difficult assemblies can possibly interrupt and restart the step from a
checkpoint.
Substeps that are run:
1. build concat consens file,
2. cluster all consens,
3. split clusters into bits,
4. align bits,
5. build indel array
6. build h5 array.
7. Enter seq data & cleanup
|
ipyrad/assemble/cluster_across.py
|
def run(data, samples, noreverse, force, randomseed, ipyclient, **kwargs):
"""
For step 6 the run function is sub divided a bit so that users with really
difficult assemblies can possibly interrupt and restart the step from a
checkpoint.
Substeps that are run:
1. build concat consens file,
2. cluster all consens,
3. split clusters into bits,
4. align bits,
5. build indel array
6. build h5 array.
7. Enter seq data & cleanup
"""
## if force then set checkpoint to zero and run all substeps for just
## the user specified steps.
if force:
data._checkpoint = 0
if kwargs.get('substeps'):
substeps = kwargs.get('substeps')
else:
substeps = range(1, 8)
## if {data}._checkpoint attribute exists then find the checkpoint where
## this assembly left off (unless force) and build step list from there.
else:
if kwargs.get('substeps'):
substeps = kwargs.get('substeps')
else:
if hasattr(data, '_checkpoint'):
substeps = range(max(1, data._checkpoint), 8)
else:
data._checkpoint = 0
substeps = range(1, 8)
## build substeps list to subset which funtions need to be run
if isinstance(substeps, (int, float, str)):
substeps = [substeps]
substeps = [int(i) for i in substeps]
## print continuation message
if substeps[0] != 1:
print("{}Continuing from checkpoint 6.{}"\
.format(data._spacer, substeps[0]))
LOGGER.info("checkpoint = %s", data._checkpoint)
LOGGER.info("substeps = %s", substeps)
## Set variables on data that are needed for all steps;
data.dirs.across = os.path.realpath(
os.path.join(data.paramsdict["project_dir"], data.name+"_across"))
data.tmpdir = os.path.join(data.dirs.across, data.name+"-tmpalign")
data.clust_database = os.path.join(data.dirs.across, data.name+".clust.hdf5")
if not os.path.exists(data.dirs.across):
os.mkdir(data.dirs.across)
if not os.path.exists(data.tmpdir):
os.mkdir(data.tmpdir)
data.cpus = data._ipcluster["cores"]
if not data.cpus:
data.cpus = len(ipyclient)
## STEP 6-1: Clean database and build input concat file for clustering
if 1 in substeps:
clean_and_build_concat(data, samples, randomseed, ipyclient)
data._checkpoint = 1
## STEP 6-2: Cluster across w/ vsearch; uses all threads on largest host
if 2 in substeps:
call_cluster(data, noreverse, ipyclient)
data._checkpoint = 2
## builds consens cluster bits and writes them to the tmp directory. These
## will not be deleted until either step 6-6 is complete, or the force flag
## is used. This will clear the tmpdir if it is run.
if 3 in substeps:
build_clustbits(data, ipyclient, force)
data._checkpoint = 3
## muscle align the cluster bits and create tmp hdf5 indel arrays for the
## next step. These will not be deleted until...
if 4 in substeps:
multi_muscle_align(data, samples, ipyclient)
data._checkpoint = 4
## fill the indel array with the indel tmp arrays from aligning step.
if 5 in substeps:
build_indels(data, samples, ipyclient)
data._checkpoint = 5
if 6 in substeps:
## builds the final HDF5 array which includes three main keys
## /catg -- contains all indiv catgs and has indels inserted
## .attr['samples'] = [samples]
## /filters -- filled for dups, left empty for others until step 7.
## .attr['filters'] = [f1, f2, f3, f4, f5]
## /seqs -- contains the clustered sequence data as string arrays
## .attr['samples'] = [samples]
## /edges -- gets the paired split locations for now.
## /snps -- left empty for now
## FILL SUPERCATG and fills dupfilter, indfilter, and nalleles
## this function calls singlecat() on each sample and enters their
## resulting arrays into the superarray. If all singlecats are built
## then it will continue to enter them into the database.
LOGGER.info("multicat -- building full database")
new_multicat(data, samples, ipyclient)
data._checkpoint = 6
if 7 in substeps:
## FILL SUPERSEQS and fills edges(splits) for paired-end data
fill_superseqs(data, samples)
data._checkpoint = 7
## remove files but not dir (used in step 1 too)
cleanup_tempfiles(data)
## remove the tmpdir
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
## set sample states
for sample in samples:
sample.stats.state = 6
print("")
|
def run(data, samples, noreverse, force, randomseed, ipyclient, **kwargs):
"""
For step 6 the run function is sub divided a bit so that users with really
difficult assemblies can possibly interrupt and restart the step from a
checkpoint.
Substeps that are run:
1. build concat consens file,
2. cluster all consens,
3. split clusters into bits,
4. align bits,
5. build indel array
6. build h5 array.
7. Enter seq data & cleanup
"""
## if force then set checkpoint to zero and run all substeps for just
## the user specified steps.
if force:
data._checkpoint = 0
if kwargs.get('substeps'):
substeps = kwargs.get('substeps')
else:
substeps = range(1, 8)
## if {data}._checkpoint attribute exists then find the checkpoint where
## this assembly left off (unless force) and build step list from there.
else:
if kwargs.get('substeps'):
substeps = kwargs.get('substeps')
else:
if hasattr(data, '_checkpoint'):
substeps = range(max(1, data._checkpoint), 8)
else:
data._checkpoint = 0
substeps = range(1, 8)
## build substeps list to subset which funtions need to be run
if isinstance(substeps, (int, float, str)):
substeps = [substeps]
substeps = [int(i) for i in substeps]
## print continuation message
if substeps[0] != 1:
print("{}Continuing from checkpoint 6.{}"\
.format(data._spacer, substeps[0]))
LOGGER.info("checkpoint = %s", data._checkpoint)
LOGGER.info("substeps = %s", substeps)
## Set variables on data that are needed for all steps;
data.dirs.across = os.path.realpath(
os.path.join(data.paramsdict["project_dir"], data.name+"_across"))
data.tmpdir = os.path.join(data.dirs.across, data.name+"-tmpalign")
data.clust_database = os.path.join(data.dirs.across, data.name+".clust.hdf5")
if not os.path.exists(data.dirs.across):
os.mkdir(data.dirs.across)
if not os.path.exists(data.tmpdir):
os.mkdir(data.tmpdir)
data.cpus = data._ipcluster["cores"]
if not data.cpus:
data.cpus = len(ipyclient)
## STEP 6-1: Clean database and build input concat file for clustering
if 1 in substeps:
clean_and_build_concat(data, samples, randomseed, ipyclient)
data._checkpoint = 1
## STEP 6-2: Cluster across w/ vsearch; uses all threads on largest host
if 2 in substeps:
call_cluster(data, noreverse, ipyclient)
data._checkpoint = 2
## builds consens cluster bits and writes them to the tmp directory. These
## will not be deleted until either step 6-6 is complete, or the force flag
## is used. This will clear the tmpdir if it is run.
if 3 in substeps:
build_clustbits(data, ipyclient, force)
data._checkpoint = 3
## muscle align the cluster bits and create tmp hdf5 indel arrays for the
## next step. These will not be deleted until...
if 4 in substeps:
multi_muscle_align(data, samples, ipyclient)
data._checkpoint = 4
## fill the indel array with the indel tmp arrays from aligning step.
if 5 in substeps:
build_indels(data, samples, ipyclient)
data._checkpoint = 5
if 6 in substeps:
## builds the final HDF5 array which includes three main keys
## /catg -- contains all indiv catgs and has indels inserted
## .attr['samples'] = [samples]
## /filters -- filled for dups, left empty for others until step 7.
## .attr['filters'] = [f1, f2, f3, f4, f5]
## /seqs -- contains the clustered sequence data as string arrays
## .attr['samples'] = [samples]
## /edges -- gets the paired split locations for now.
## /snps -- left empty for now
## FILL SUPERCATG and fills dupfilter, indfilter, and nalleles
## this function calls singlecat() on each sample and enters their
## resulting arrays into the superarray. If all singlecats are built
## then it will continue to enter them into the database.
LOGGER.info("multicat -- building full database")
new_multicat(data, samples, ipyclient)
data._checkpoint = 6
if 7 in substeps:
## FILL SUPERSEQS and fills edges(splits) for paired-end data
fill_superseqs(data, samples)
data._checkpoint = 7
## remove files but not dir (used in step 1 too)
cleanup_tempfiles(data)
## remove the tmpdir
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
## set sample states
for sample in samples:
sample.stats.state = 6
print("")
|
[
"For",
"step",
"6",
"the",
"run",
"function",
"is",
"sub",
"divided",
"a",
"bit",
"so",
"that",
"users",
"with",
"really",
"difficult",
"assemblies",
"can",
"possibly",
"interrupt",
"and",
"restart",
"the",
"step",
"from",
"a",
"checkpoint",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1682-L1805
|
[
"def",
"run",
"(",
"data",
",",
"samples",
",",
"noreverse",
",",
"force",
",",
"randomseed",
",",
"ipyclient",
",",
"*",
"*",
"kwargs",
")",
":",
"## if force then set checkpoint to zero and run all substeps for just",
"## the user specified steps. ",
"if",
"force",
":",
"data",
".",
"_checkpoint",
"=",
"0",
"if",
"kwargs",
".",
"get",
"(",
"'substeps'",
")",
":",
"substeps",
"=",
"kwargs",
".",
"get",
"(",
"'substeps'",
")",
"else",
":",
"substeps",
"=",
"range",
"(",
"1",
",",
"8",
")",
"## if {data}._checkpoint attribute exists then find the checkpoint where",
"## this assembly left off (unless force) and build step list from there.",
"else",
":",
"if",
"kwargs",
".",
"get",
"(",
"'substeps'",
")",
":",
"substeps",
"=",
"kwargs",
".",
"get",
"(",
"'substeps'",
")",
"else",
":",
"if",
"hasattr",
"(",
"data",
",",
"'_checkpoint'",
")",
":",
"substeps",
"=",
"range",
"(",
"max",
"(",
"1",
",",
"data",
".",
"_checkpoint",
")",
",",
"8",
")",
"else",
":",
"data",
".",
"_checkpoint",
"=",
"0",
"substeps",
"=",
"range",
"(",
"1",
",",
"8",
")",
"## build substeps list to subset which funtions need to be run",
"if",
"isinstance",
"(",
"substeps",
",",
"(",
"int",
",",
"float",
",",
"str",
")",
")",
":",
"substeps",
"=",
"[",
"substeps",
"]",
"substeps",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"substeps",
"]",
"## print continuation message",
"if",
"substeps",
"[",
"0",
"]",
"!=",
"1",
":",
"print",
"(",
"\"{}Continuing from checkpoint 6.{}\"",
".",
"format",
"(",
"data",
".",
"_spacer",
",",
"substeps",
"[",
"0",
"]",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"checkpoint = %s\"",
",",
"data",
".",
"_checkpoint",
")",
"LOGGER",
".",
"info",
"(",
"\"substeps = %s\"",
",",
"substeps",
")",
"## Set variables on data that are needed for all steps;",
"data",
".",
"dirs",
".",
"across",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"paramsdict",
"[",
"\"project_dir\"",
"]",
",",
"data",
".",
"name",
"+",
"\"_across\"",
")",
")",
"data",
".",
"tmpdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"-tmpalign\"",
")",
"data",
".",
"clust_database",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".clust.hdf5\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"dirs",
".",
"across",
")",
":",
"os",
".",
"mkdir",
"(",
"data",
".",
"dirs",
".",
"across",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"tmpdir",
")",
":",
"os",
".",
"mkdir",
"(",
"data",
".",
"tmpdir",
")",
"data",
".",
"cpus",
"=",
"data",
".",
"_ipcluster",
"[",
"\"cores\"",
"]",
"if",
"not",
"data",
".",
"cpus",
":",
"data",
".",
"cpus",
"=",
"len",
"(",
"ipyclient",
")",
"## STEP 6-1: Clean database and build input concat file for clustering",
"if",
"1",
"in",
"substeps",
":",
"clean_and_build_concat",
"(",
"data",
",",
"samples",
",",
"randomseed",
",",
"ipyclient",
")",
"data",
".",
"_checkpoint",
"=",
"1",
"## STEP 6-2: Cluster across w/ vsearch; uses all threads on largest host ",
"if",
"2",
"in",
"substeps",
":",
"call_cluster",
"(",
"data",
",",
"noreverse",
",",
"ipyclient",
")",
"data",
".",
"_checkpoint",
"=",
"2",
"## builds consens cluster bits and writes them to the tmp directory. These",
"## will not be deleted until either step 6-6 is complete, or the force flag",
"## is used. This will clear the tmpdir if it is run.",
"if",
"3",
"in",
"substeps",
":",
"build_clustbits",
"(",
"data",
",",
"ipyclient",
",",
"force",
")",
"data",
".",
"_checkpoint",
"=",
"3",
"## muscle align the cluster bits and create tmp hdf5 indel arrays for the",
"## next step. These will not be deleted until...",
"if",
"4",
"in",
"substeps",
":",
"multi_muscle_align",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
"data",
".",
"_checkpoint",
"=",
"4",
"## fill the indel array with the indel tmp arrays from aligning step.",
"if",
"5",
"in",
"substeps",
":",
"build_indels",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
"data",
".",
"_checkpoint",
"=",
"5",
"if",
"6",
"in",
"substeps",
":",
"## builds the final HDF5 array which includes three main keys",
"## /catg -- contains all indiv catgs and has indels inserted",
"## .attr['samples'] = [samples]",
"## /filters -- filled for dups, left empty for others until step 7.",
"## .attr['filters'] = [f1, f2, f3, f4, f5]",
"## /seqs -- contains the clustered sequence data as string arrays",
"## .attr['samples'] = [samples]",
"## /edges -- gets the paired split locations for now.",
"## /snps -- left empty for now",
"## FILL SUPERCATG and fills dupfilter, indfilter, and nalleles",
"## this function calls singlecat() on each sample and enters their",
"## resulting arrays into the superarray. If all singlecats are built",
"## then it will continue to enter them into the database. ",
"LOGGER",
".",
"info",
"(",
"\"multicat -- building full database\"",
")",
"new_multicat",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
"data",
".",
"_checkpoint",
"=",
"6",
"if",
"7",
"in",
"substeps",
":",
"## FILL SUPERSEQS and fills edges(splits) for paired-end data",
"fill_superseqs",
"(",
"data",
",",
"samples",
")",
"data",
".",
"_checkpoint",
"=",
"7",
"## remove files but not dir (used in step 1 too)",
"cleanup_tempfiles",
"(",
"data",
")",
"## remove the tmpdir",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"tmpdir",
")",
":",
"shutil",
".",
"rmtree",
"(",
"data",
".",
"tmpdir",
")",
"## set sample states",
"for",
"sample",
"in",
"samples",
":",
"sample",
".",
"stats",
".",
"state",
"=",
"6",
"print",
"(",
"\"\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
cleanup_tempfiles
|
Function to remove older files. This is called either in substep 1 or after
the final substep so that tempfiles are retained for restarting interrupted
jobs until we're sure they're no longer needed.
|
ipyrad/assemble/cluster_across.py
|
def cleanup_tempfiles(data):
"""
Function to remove older files. This is called either in substep 1 or after
the final substep so that tempfiles are retained for restarting interrupted
jobs until we're sure they're no longer needed.
"""
## remove align-related tmp files
tmps1 = glob.glob(os.path.join(data.tmpdir, "*.fa"))
tmps2 = glob.glob(os.path.join(data.tmpdir, "*.npy"))
for tmp in tmps1 + tmps2:
if os.path.exists(tmp):
os.remove(tmp)
## remove cluster related files
removal = [
os.path.join(data.dirs.across, data.name+".utemp"),
os.path.join(data.dirs.across, data.name+".htemp"),
os.path.join(data.dirs.across, data.name+"_catcons.tmp"),
os.path.join(data.dirs.across, data.name+"_cathaps.tmp"),
os.path.join(data.dirs.across, data.name+"_catshuf.tmp"),
os.path.join(data.dirs.across, data.name+"_catsort.tmp"),
os.path.join(data.dirs.across, data.name+".tmparrs.h5"),
os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5"),
]
for rfile in removal:
if os.path.exists(rfile):
os.remove(rfile)
## remove singlecat related h5 files
smpios = glob.glob(os.path.join(data.dirs.across, '*.tmp.h5'))
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
|
def cleanup_tempfiles(data):
"""
Function to remove older files. This is called either in substep 1 or after
the final substep so that tempfiles are retained for restarting interrupted
jobs until we're sure they're no longer needed.
"""
## remove align-related tmp files
tmps1 = glob.glob(os.path.join(data.tmpdir, "*.fa"))
tmps2 = glob.glob(os.path.join(data.tmpdir, "*.npy"))
for tmp in tmps1 + tmps2:
if os.path.exists(tmp):
os.remove(tmp)
## remove cluster related files
removal = [
os.path.join(data.dirs.across, data.name+".utemp"),
os.path.join(data.dirs.across, data.name+".htemp"),
os.path.join(data.dirs.across, data.name+"_catcons.tmp"),
os.path.join(data.dirs.across, data.name+"_cathaps.tmp"),
os.path.join(data.dirs.across, data.name+"_catshuf.tmp"),
os.path.join(data.dirs.across, data.name+"_catsort.tmp"),
os.path.join(data.dirs.across, data.name+".tmparrs.h5"),
os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5"),
]
for rfile in removal:
if os.path.exists(rfile):
os.remove(rfile)
## remove singlecat related h5 files
smpios = glob.glob(os.path.join(data.dirs.across, '*.tmp.h5'))
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
|
[
"Function",
"to",
"remove",
"older",
"files",
".",
"This",
"is",
"called",
"either",
"in",
"substep",
"1",
"or",
"after",
"the",
"final",
"substep",
"so",
"that",
"tempfiles",
"are",
"retained",
"for",
"restarting",
"interrupted",
"jobs",
"until",
"we",
"re",
"sure",
"they",
"re",
"no",
"longer",
"needed",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1809-L1842
|
[
"def",
"cleanup_tempfiles",
"(",
"data",
")",
":",
"## remove align-related tmp files",
"tmps1",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"*.fa\"",
")",
")",
"tmps2",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"tmpdir",
",",
"\"*.npy\"",
")",
")",
"for",
"tmp",
"in",
"tmps1",
"+",
"tmps2",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"tmp",
")",
":",
"os",
".",
"remove",
"(",
"tmp",
")",
"## remove cluster related files",
"removal",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".utemp\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".htemp\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catcons.tmp\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_cathaps.tmp\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catshuf.tmp\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\"_catsort.tmp\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmparrs.h5\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"data",
".",
"name",
"+",
"\".tmp.indels.hdf5\"",
")",
",",
"]",
"for",
"rfile",
"in",
"removal",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"rfile",
")",
":",
"os",
".",
"remove",
"(",
"rfile",
")",
"## remove singlecat related h5 files",
"smpios",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"across",
",",
"'*.tmp.h5'",
")",
")",
"for",
"smpio",
"in",
"smpios",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"smpio",
")",
":",
"os",
".",
"remove",
"(",
"smpio",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
assembly_cleanup
|
cleanup for assembly object
|
ipyrad/assemble/rawedit.py
|
def assembly_cleanup(data):
""" cleanup for assembly object """
## build s2 results data frame
data.stats_dfs.s2 = data._build_stat("s2")
data.stats_files.s2 = os.path.join(data.dirs.edits, 's2_rawedit_stats.txt')
## write stats for all samples
with io.open(data.stats_files.s2, 'w', encoding='utf-8') as outfile:
data.stats_dfs.s2.fillna(value=0).astype(np.int).to_string(outfile)
|
def assembly_cleanup(data):
""" cleanup for assembly object """
## build s2 results data frame
data.stats_dfs.s2 = data._build_stat("s2")
data.stats_files.s2 = os.path.join(data.dirs.edits, 's2_rawedit_stats.txt')
## write stats for all samples
with io.open(data.stats_files.s2, 'w', encoding='utf-8') as outfile:
data.stats_dfs.s2.fillna(value=0).astype(np.int).to_string(outfile)
|
[
"cleanup",
"for",
"assembly",
"object"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L36-L45
|
[
"def",
"assembly_cleanup",
"(",
"data",
")",
":",
"## build s2 results data frame",
"data",
".",
"stats_dfs",
".",
"s2",
"=",
"data",
".",
"_build_stat",
"(",
"\"s2\"",
")",
"data",
".",
"stats_files",
".",
"s2",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"'s2_rawedit_stats.txt'",
")",
"## write stats for all samples",
"with",
"io",
".",
"open",
"(",
"data",
".",
"stats_files",
".",
"s2",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"outfile",
":",
"data",
".",
"stats_dfs",
".",
"s2",
".",
"fillna",
"(",
"value",
"=",
"0",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
".",
"to_string",
"(",
"outfile",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
parse_single_results
|
parse results from cutadapt into sample data
|
ipyrad/assemble/rawedit.py
|
def parse_single_results(data, sample, res1):
""" parse results from cutadapt into sample data"""
## set default values
#sample.stats_dfs.s2["reads_raw"] = 0
sample.stats_dfs.s2["trim_adapter_bp_read1"] = 0
sample.stats_dfs.s2["trim_quality_bp_read1"] = 0
sample.stats_dfs.s2["reads_filtered_by_Ns"] = 0
sample.stats_dfs.s2["reads_filtered_by_minlen"] = 0
sample.stats_dfs.s2["reads_passed_filter"] = 0
## parse new values from cutadapt results output
lines = res1.strip().split("\n")
for line in lines:
if "Total reads processed:" in line:
value = int(line.split()[3].replace(",", ""))
sample.stats_dfs.s2["reads_raw"] = value
if "Reads with adapters:" in line:
value = int(line.split()[3].replace(",", ""))
sample.stats_dfs.s2["trim_adapter_bp_read1"] = value
if "Quality-trimmed" in line:
value = int(line.split()[1].replace(",", ""))
sample.stats_dfs.s2["trim_quality_bp_read1"] = value
if "Reads that were too short" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_minlen"] = value
if "Reads with too many N" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_Ns"] = value
if "Reads written (passing filters):" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["reads_passed_filter"] = value
## save to stats summary
if sample.stats_dfs.s2.reads_passed_filter:
sample.stats.state = 2
sample.stats.reads_passed_filter = sample.stats_dfs.s2.reads_passed_filter
sample.files.edits = [
(OPJ(data.dirs.edits, sample.name+".trimmed_R1_.fastq.gz"), 0)]
## write the long form output to the log file.
LOGGER.info(res1)
else:
print("{}No reads passed filtering in Sample: {}".format(data._spacer, sample.name))
|
def parse_single_results(data, sample, res1):
""" parse results from cutadapt into sample data"""
## set default values
#sample.stats_dfs.s2["reads_raw"] = 0
sample.stats_dfs.s2["trim_adapter_bp_read1"] = 0
sample.stats_dfs.s2["trim_quality_bp_read1"] = 0
sample.stats_dfs.s2["reads_filtered_by_Ns"] = 0
sample.stats_dfs.s2["reads_filtered_by_minlen"] = 0
sample.stats_dfs.s2["reads_passed_filter"] = 0
## parse new values from cutadapt results output
lines = res1.strip().split("\n")
for line in lines:
if "Total reads processed:" in line:
value = int(line.split()[3].replace(",", ""))
sample.stats_dfs.s2["reads_raw"] = value
if "Reads with adapters:" in line:
value = int(line.split()[3].replace(",", ""))
sample.stats_dfs.s2["trim_adapter_bp_read1"] = value
if "Quality-trimmed" in line:
value = int(line.split()[1].replace(",", ""))
sample.stats_dfs.s2["trim_quality_bp_read1"] = value
if "Reads that were too short" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_minlen"] = value
if "Reads with too many N" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_Ns"] = value
if "Reads written (passing filters):" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["reads_passed_filter"] = value
## save to stats summary
if sample.stats_dfs.s2.reads_passed_filter:
sample.stats.state = 2
sample.stats.reads_passed_filter = sample.stats_dfs.s2.reads_passed_filter
sample.files.edits = [
(OPJ(data.dirs.edits, sample.name+".trimmed_R1_.fastq.gz"), 0)]
## write the long form output to the log file.
LOGGER.info(res1)
else:
print("{}No reads passed filtering in Sample: {}".format(data._spacer, sample.name))
|
[
"parse",
"results",
"from",
"cutadapt",
"into",
"sample",
"data"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L49-L98
|
[
"def",
"parse_single_results",
"(",
"data",
",",
"sample",
",",
"res1",
")",
":",
"## set default values ",
"#sample.stats_dfs.s2[\"reads_raw\"] = 0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_adapter_bp_read1\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_quality_bp_read1\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_Ns\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_minlen\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_passed_filter\"",
"]",
"=",
"0",
"## parse new values from cutadapt results output",
"lines",
"=",
"res1",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"\"Total reads processed:\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"3",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_raw\"",
"]",
"=",
"value",
"if",
"\"Reads with adapters:\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"3",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_adapter_bp_read1\"",
"]",
"=",
"value",
"if",
"\"Quality-trimmed\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"1",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_quality_bp_read1\"",
"]",
"=",
"value",
"if",
"\"Reads that were too short\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"5",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_minlen\"",
"]",
"=",
"value",
"if",
"\"Reads with too many N\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"5",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_Ns\"",
"]",
"=",
"value",
"if",
"\"Reads written (passing filters):\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"4",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_passed_filter\"",
"]",
"=",
"value",
"## save to stats summary",
"if",
"sample",
".",
"stats_dfs",
".",
"s2",
".",
"reads_passed_filter",
":",
"sample",
".",
"stats",
".",
"state",
"=",
"2",
"sample",
".",
"stats",
".",
"reads_passed_filter",
"=",
"sample",
".",
"stats_dfs",
".",
"s2",
".",
"reads_passed_filter",
"sample",
".",
"files",
".",
"edits",
"=",
"[",
"(",
"OPJ",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sample",
".",
"name",
"+",
"\".trimmed_R1_.fastq.gz\"",
")",
",",
"0",
")",
"]",
"## write the long form output to the log file.",
"LOGGER",
".",
"info",
"(",
"res1",
")",
"else",
":",
"print",
"(",
"\"{}No reads passed filtering in Sample: {}\"",
".",
"format",
"(",
"data",
".",
"_spacer",
",",
"sample",
".",
"name",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
parse_pair_results
|
parse results from cutadapt for paired data
|
ipyrad/assemble/rawedit.py
|
def parse_pair_results(data, sample, res):
""" parse results from cutadapt for paired data"""
LOGGER.info("in parse pair mod results\n%s", res)
## set default values
sample.stats_dfs.s2["trim_adapter_bp_read1"] = 0
sample.stats_dfs.s2["trim_adapter_bp_read2"] = 0
sample.stats_dfs.s2["trim_quality_bp_read1"] = 0
sample.stats_dfs.s2["trim_quality_bp_read2"] = 0
sample.stats_dfs.s2["reads_filtered_by_Ns"] = 0
sample.stats_dfs.s2["reads_filtered_by_minlen"] = 0
sample.stats_dfs.s2["reads_passed_filter"] = 0
lines = res.strip().split("\n")
qprimed = 0
for line in lines:
## set primer to catch next line
if "Quality-trimmed" in line:
qprimed = 1
## grab read1 and read2 lines when qprimed
if "Read 1:" in line:
if qprimed:
value = int(line.split()[2].replace(",", ""))
sample.stats_dfs.s2["trim_quality_bp_read1"] = value
if "Read 2:" in line:
if qprimed:
value = int(line.split()[2].replace(",", ""))
sample.stats_dfs.s2["trim_quality_bp_read2"] = value
qprimed = 0
if "Read 1 with adapter:" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["trim_adapter_bp_read1"] = value
if "Read 2 with adapter:" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["trim_adapter_bp_read2"] = value
if "Total read pairs processed:" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["reads_raw"] = value
if "Pairs that were too short" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_minlen"] = value
if "Pairs with too many N" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_Ns"] = value
if "Pairs written (passing filters):" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["reads_passed_filter"] = value
## save to stats summary
if sample.stats_dfs.s2.reads_passed_filter:
sample.stats.state = 2
sample.stats.reads_passed_filter = sample.stats_dfs.s2.reads_passed_filter
sample.files.edits = [(
OPJ(data.dirs.edits, sample.name+".trimmed_R1_.fastq.gz"),
OPJ(data.dirs.edits, sample.name+".trimmed_R2_.fastq.gz")
)]
else:
print("No reads passed filtering in Sample: {}".format(sample.name))
|
def parse_pair_results(data, sample, res):
""" parse results from cutadapt for paired data"""
LOGGER.info("in parse pair mod results\n%s", res)
## set default values
sample.stats_dfs.s2["trim_adapter_bp_read1"] = 0
sample.stats_dfs.s2["trim_adapter_bp_read2"] = 0
sample.stats_dfs.s2["trim_quality_bp_read1"] = 0
sample.stats_dfs.s2["trim_quality_bp_read2"] = 0
sample.stats_dfs.s2["reads_filtered_by_Ns"] = 0
sample.stats_dfs.s2["reads_filtered_by_minlen"] = 0
sample.stats_dfs.s2["reads_passed_filter"] = 0
lines = res.strip().split("\n")
qprimed = 0
for line in lines:
## set primer to catch next line
if "Quality-trimmed" in line:
qprimed = 1
## grab read1 and read2 lines when qprimed
if "Read 1:" in line:
if qprimed:
value = int(line.split()[2].replace(",", ""))
sample.stats_dfs.s2["trim_quality_bp_read1"] = value
if "Read 2:" in line:
if qprimed:
value = int(line.split()[2].replace(",", ""))
sample.stats_dfs.s2["trim_quality_bp_read2"] = value
qprimed = 0
if "Read 1 with adapter:" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["trim_adapter_bp_read1"] = value
if "Read 2 with adapter:" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["trim_adapter_bp_read2"] = value
if "Total read pairs processed:" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["reads_raw"] = value
if "Pairs that were too short" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_minlen"] = value
if "Pairs with too many N" in line:
value = int(line.split()[5].replace(",", ""))
sample.stats_dfs.s2["reads_filtered_by_Ns"] = value
if "Pairs written (passing filters):" in line:
value = int(line.split()[4].replace(",", ""))
sample.stats_dfs.s2["reads_passed_filter"] = value
## save to stats summary
if sample.stats_dfs.s2.reads_passed_filter:
sample.stats.state = 2
sample.stats.reads_passed_filter = sample.stats_dfs.s2.reads_passed_filter
sample.files.edits = [(
OPJ(data.dirs.edits, sample.name+".trimmed_R1_.fastq.gz"),
OPJ(data.dirs.edits, sample.name+".trimmed_R2_.fastq.gz")
)]
else:
print("No reads passed filtering in Sample: {}".format(sample.name))
|
[
"parse",
"results",
"from",
"cutadapt",
"for",
"paired",
"data"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L102-L167
|
[
"def",
"parse_pair_results",
"(",
"data",
",",
"sample",
",",
"res",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"in parse pair mod results\\n%s\"",
",",
"res",
")",
"## set default values",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_adapter_bp_read1\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_adapter_bp_read2\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_quality_bp_read1\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_quality_bp_read2\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_Ns\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_minlen\"",
"]",
"=",
"0",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_passed_filter\"",
"]",
"=",
"0",
"lines",
"=",
"res",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"qprimed",
"=",
"0",
"for",
"line",
"in",
"lines",
":",
"## set primer to catch next line",
"if",
"\"Quality-trimmed\"",
"in",
"line",
":",
"qprimed",
"=",
"1",
"## grab read1 and read2 lines when qprimed",
"if",
"\"Read 1:\"",
"in",
"line",
":",
"if",
"qprimed",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"2",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_quality_bp_read1\"",
"]",
"=",
"value",
"if",
"\"Read 2:\"",
"in",
"line",
":",
"if",
"qprimed",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"2",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_quality_bp_read2\"",
"]",
"=",
"value",
"qprimed",
"=",
"0",
"if",
"\"Read 1 with adapter:\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"4",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_adapter_bp_read1\"",
"]",
"=",
"value",
"if",
"\"Read 2 with adapter:\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"4",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"trim_adapter_bp_read2\"",
"]",
"=",
"value",
"if",
"\"Total read pairs processed:\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"4",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_raw\"",
"]",
"=",
"value",
"if",
"\"Pairs that were too short\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"5",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_minlen\"",
"]",
"=",
"value",
"if",
"\"Pairs with too many N\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"5",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_filtered_by_Ns\"",
"]",
"=",
"value",
"if",
"\"Pairs written (passing filters):\"",
"in",
"line",
":",
"value",
"=",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"4",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
")",
"sample",
".",
"stats_dfs",
".",
"s2",
"[",
"\"reads_passed_filter\"",
"]",
"=",
"value",
"## save to stats summary",
"if",
"sample",
".",
"stats_dfs",
".",
"s2",
".",
"reads_passed_filter",
":",
"sample",
".",
"stats",
".",
"state",
"=",
"2",
"sample",
".",
"stats",
".",
"reads_passed_filter",
"=",
"sample",
".",
"stats_dfs",
".",
"s2",
".",
"reads_passed_filter",
"sample",
".",
"files",
".",
"edits",
"=",
"[",
"(",
"OPJ",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sample",
".",
"name",
"+",
"\".trimmed_R1_.fastq.gz\"",
")",
",",
"OPJ",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sample",
".",
"name",
"+",
"\".trimmed_R2_.fastq.gz\"",
")",
")",
"]",
"else",
":",
"print",
"(",
"\"No reads passed filtering in Sample: {}\"",
".",
"format",
"(",
"sample",
".",
"name",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
cutadaptit_single
|
Applies quality and adapter filters to reads using cutadapt. If the ipyrad
filter param is set to 0 then it only filters to hard trim edges and uses
mintrimlen. If filter=1, we add quality filters. If filter=2 we add
adapter filters.
|
ipyrad/assemble/rawedit.py
|
def cutadaptit_single(data, sample):
"""
Applies quality and adapter filters to reads using cutadapt. If the ipyrad
filter param is set to 0 then it only filters to hard trim edges and uses
mintrimlen. If filter=1, we add quality filters. If filter=2 we add
adapter filters.
"""
sname = sample.name
## if (GBS, ddRAD) we look for the second cut site + adapter. For single-end
## data we don't bother trying to remove the second barcode since it's not
## as critical as with PE data.
if data.paramsdict["datatype"] == "rad":
adapter = data._hackersonly["p3_adapter"]
else:
## if GBS then the barcode can also be on the other side.
if data.paramsdict["datatype"] == "gbs":
## make full adapter (-revcompcut-revcompbarcode-adapter)
## and add adapter without revcompbarcode
if data.barcodes:
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ fullcomp(data.barcodes[sample.name])[::-1] \
+ data._hackersonly["p3_adapter"]
## add incomplete adapter to extras (-recompcut-adapter)
data._hackersonly["p3_adapters_extra"].append(
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"])
else:
LOGGER.warning("No barcode information present, and is therefore not "+\
"being used for adapter trimming of SE gbs data.")
## else no search for barcodes on 3'
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
else:
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
## get length trim parameter from new or older version of ipyrad params
trim5r1 = trim3r1 = []
if data.paramsdict.get("trim_reads"):
trimlen = data.paramsdict.get("trim_reads")
## trim 5' end
if trimlen[0]:
trim5r1 = ["-u", str(trimlen[0])]
if trimlen[1] < 0:
trim3r1 = ["-u", str(trimlen[1])]
if trimlen[1] > 0:
trim3r1 = ["--length", str(trimlen[1])]
else:
trimlen = data.paramsdict.get("edit_cutsites")
trim5r1 = ["--cut", str(trimlen[0])]
## testing new 'trim_reads' setting
cmdf1 = ["cutadapt"]
if trim5r1:
cmdf1 += trim5r1
if trim3r1:
cmdf1 += trim3r1
cmdf1 += ["--minimum-length", str(data.paramsdict["filter_min_trim_len"]),
"--max-n", str(data.paramsdict["max_low_qual_bases"]),
"--trim-n",
"--output", OPJ(data.dirs.edits, sname+".trimmed_R1_.fastq.gz"),
sample.files.concat[0][0]]
if int(data.paramsdict["filter_adapters"]):
## NEW: only quality trim the 3' end for SE data.
cmdf1.insert(1, "20")
cmdf1.insert(1, "-q")
cmdf1.insert(1, str(data.paramsdict["phred_Qscore_offset"]))
cmdf1.insert(1, "--quality-base")
## if filter_adapters==3 then p3_adapters_extra will already have extra
## poly adapters added to its list.
if int(data.paramsdict["filter_adapters"]) > 1:
## first enter extra cuts (order of input is reversed)
for extracut in list(set(data._hackersonly["p3_adapters_extra"]))[::-1]:
cmdf1.insert(1, extracut)
cmdf1.insert(1, "-a")
## then put the main cut so it appears first in command
cmdf1.insert(1, adapter)
cmdf1.insert(1, "-a")
## do modifications to read1 and write to tmp file
LOGGER.info(cmdf1)
proc1 = sps.Popen(cmdf1, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
try:
res1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
raise KeyboardInterrupt
## raise errors if found
if proc1.returncode:
raise IPyradWarningExit(" error in {}\n {}".format(" ".join(cmdf1), res1))
## return result string to be parsed outside of engine
return res1
|
def cutadaptit_single(data, sample):
"""
Applies quality and adapter filters to reads using cutadapt. If the ipyrad
filter param is set to 0 then it only filters to hard trim edges and uses
mintrimlen. If filter=1, we add quality filters. If filter=2 we add
adapter filters.
"""
sname = sample.name
## if (GBS, ddRAD) we look for the second cut site + adapter. For single-end
## data we don't bother trying to remove the second barcode since it's not
## as critical as with PE data.
if data.paramsdict["datatype"] == "rad":
adapter = data._hackersonly["p3_adapter"]
else:
## if GBS then the barcode can also be on the other side.
if data.paramsdict["datatype"] == "gbs":
## make full adapter (-revcompcut-revcompbarcode-adapter)
## and add adapter without revcompbarcode
if data.barcodes:
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ fullcomp(data.barcodes[sample.name])[::-1] \
+ data._hackersonly["p3_adapter"]
## add incomplete adapter to extras (-recompcut-adapter)
data._hackersonly["p3_adapters_extra"].append(
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"])
else:
LOGGER.warning("No barcode information present, and is therefore not "+\
"being used for adapter trimming of SE gbs data.")
## else no search for barcodes on 3'
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
else:
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
## get length trim parameter from new or older version of ipyrad params
trim5r1 = trim3r1 = []
if data.paramsdict.get("trim_reads"):
trimlen = data.paramsdict.get("trim_reads")
## trim 5' end
if trimlen[0]:
trim5r1 = ["-u", str(trimlen[0])]
if trimlen[1] < 0:
trim3r1 = ["-u", str(trimlen[1])]
if trimlen[1] > 0:
trim3r1 = ["--length", str(trimlen[1])]
else:
trimlen = data.paramsdict.get("edit_cutsites")
trim5r1 = ["--cut", str(trimlen[0])]
## testing new 'trim_reads' setting
cmdf1 = ["cutadapt"]
if trim5r1:
cmdf1 += trim5r1
if trim3r1:
cmdf1 += trim3r1
cmdf1 += ["--minimum-length", str(data.paramsdict["filter_min_trim_len"]),
"--max-n", str(data.paramsdict["max_low_qual_bases"]),
"--trim-n",
"--output", OPJ(data.dirs.edits, sname+".trimmed_R1_.fastq.gz"),
sample.files.concat[0][0]]
if int(data.paramsdict["filter_adapters"]):
## NEW: only quality trim the 3' end for SE data.
cmdf1.insert(1, "20")
cmdf1.insert(1, "-q")
cmdf1.insert(1, str(data.paramsdict["phred_Qscore_offset"]))
cmdf1.insert(1, "--quality-base")
## if filter_adapters==3 then p3_adapters_extra will already have extra
## poly adapters added to its list.
if int(data.paramsdict["filter_adapters"]) > 1:
## first enter extra cuts (order of input is reversed)
for extracut in list(set(data._hackersonly["p3_adapters_extra"]))[::-1]:
cmdf1.insert(1, extracut)
cmdf1.insert(1, "-a")
## then put the main cut so it appears first in command
cmdf1.insert(1, adapter)
cmdf1.insert(1, "-a")
## do modifications to read1 and write to tmp file
LOGGER.info(cmdf1)
proc1 = sps.Popen(cmdf1, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
try:
res1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
raise KeyboardInterrupt
## raise errors if found
if proc1.returncode:
raise IPyradWarningExit(" error in {}\n {}".format(" ".join(cmdf1), res1))
## return result string to be parsed outside of engine
return res1
|
[
"Applies",
"quality",
"and",
"adapter",
"filters",
"to",
"reads",
"using",
"cutadapt",
".",
"If",
"the",
"ipyrad",
"filter",
"param",
"is",
"set",
"to",
"0",
"then",
"it",
"only",
"filters",
"to",
"hard",
"trim",
"edges",
"and",
"uses",
"mintrimlen",
".",
"If",
"filter",
"=",
"1",
"we",
"add",
"quality",
"filters",
".",
"If",
"filter",
"=",
"2",
"we",
"add",
"adapter",
"filters",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L171-L273
|
[
"def",
"cutadaptit_single",
"(",
"data",
",",
"sample",
")",
":",
"sname",
"=",
"sample",
".",
"name",
"## if (GBS, ddRAD) we look for the second cut site + adapter. For single-end",
"## data we don't bother trying to remove the second barcode since it's not",
"## as critical as with PE data.",
"if",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
"==",
"\"rad\"",
":",
"adapter",
"=",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
"else",
":",
"## if GBS then the barcode can also be on the other side. ",
"if",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
"==",
"\"gbs\"",
":",
"## make full adapter (-revcompcut-revcompbarcode-adapter)",
"## and add adapter without revcompbarcode",
"if",
"data",
".",
"barcodes",
":",
"adapter",
"=",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"fullcomp",
"(",
"data",
".",
"barcodes",
"[",
"sample",
".",
"name",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
"## add incomplete adapter to extras (-recompcut-adapter)",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapters_extra\"",
"]",
".",
"append",
"(",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
")",
"else",
":",
"LOGGER",
".",
"warning",
"(",
"\"No barcode information present, and is therefore not \"",
"+",
"\"being used for adapter trimming of SE gbs data.\"",
")",
"## else no search for barcodes on 3'",
"adapter",
"=",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
"else",
":",
"adapter",
"=",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
"## get length trim parameter from new or older version of ipyrad params",
"trim5r1",
"=",
"trim3r1",
"=",
"[",
"]",
"if",
"data",
".",
"paramsdict",
".",
"get",
"(",
"\"trim_reads\"",
")",
":",
"trimlen",
"=",
"data",
".",
"paramsdict",
".",
"get",
"(",
"\"trim_reads\"",
")",
"## trim 5' end",
"if",
"trimlen",
"[",
"0",
"]",
":",
"trim5r1",
"=",
"[",
"\"-u\"",
",",
"str",
"(",
"trimlen",
"[",
"0",
"]",
")",
"]",
"if",
"trimlen",
"[",
"1",
"]",
"<",
"0",
":",
"trim3r1",
"=",
"[",
"\"-u\"",
",",
"str",
"(",
"trimlen",
"[",
"1",
"]",
")",
"]",
"if",
"trimlen",
"[",
"1",
"]",
">",
"0",
":",
"trim3r1",
"=",
"[",
"\"--length\"",
",",
"str",
"(",
"trimlen",
"[",
"1",
"]",
")",
"]",
"else",
":",
"trimlen",
"=",
"data",
".",
"paramsdict",
".",
"get",
"(",
"\"edit_cutsites\"",
")",
"trim5r1",
"=",
"[",
"\"--cut\"",
",",
"str",
"(",
"trimlen",
"[",
"0",
"]",
")",
"]",
"## testing new 'trim_reads' setting",
"cmdf1",
"=",
"[",
"\"cutadapt\"",
"]",
"if",
"trim5r1",
":",
"cmdf1",
"+=",
"trim5r1",
"if",
"trim3r1",
":",
"cmdf1",
"+=",
"trim3r1",
"cmdf1",
"+=",
"[",
"\"--minimum-length\"",
",",
"str",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_min_trim_len\"",
"]",
")",
",",
"\"--max-n\"",
",",
"str",
"(",
"data",
".",
"paramsdict",
"[",
"\"max_low_qual_bases\"",
"]",
")",
",",
"\"--trim-n\"",
",",
"\"--output\"",
",",
"OPJ",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sname",
"+",
"\".trimmed_R1_.fastq.gz\"",
")",
",",
"sample",
".",
"files",
".",
"concat",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"if",
"int",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_adapters\"",
"]",
")",
":",
"## NEW: only quality trim the 3' end for SE data.",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"20\"",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"-q\"",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"str",
"(",
"data",
".",
"paramsdict",
"[",
"\"phred_Qscore_offset\"",
"]",
")",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"--quality-base\"",
")",
"## if filter_adapters==3 then p3_adapters_extra will already have extra",
"## poly adapters added to its list. ",
"if",
"int",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_adapters\"",
"]",
")",
">",
"1",
":",
"## first enter extra cuts (order of input is reversed)",
"for",
"extracut",
"in",
"list",
"(",
"set",
"(",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapters_extra\"",
"]",
")",
")",
"[",
":",
":",
"-",
"1",
"]",
":",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"extracut",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"-a\"",
")",
"## then put the main cut so it appears first in command",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"adapter",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"-a\"",
")",
"## do modifications to read1 and write to tmp file",
"LOGGER",
".",
"info",
"(",
"cmdf1",
")",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmdf1",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"try",
":",
"res1",
"=",
"proc1",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"except",
"KeyboardInterrupt",
":",
"proc1",
".",
"kill",
"(",
")",
"raise",
"KeyboardInterrupt",
"## raise errors if found",
"if",
"proc1",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\" error in {}\\n {}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"cmdf1",
")",
",",
"res1",
")",
")",
"## return result string to be parsed outside of engine",
"return",
"res1"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
cutadaptit_pairs
|
Applies trim & filters to pairs, including adapter detection. If we have
barcode information then we use it to trim reversecut+bcode+adapter from
reverse read, if not then we have to apply a more general cut to make sure
we remove the barcode, this uses wildcards and so will have more false
positives that trim a little extra from the ends of reads. Should we add
a warning about this when filter_adapters=2 and no barcodes?
|
ipyrad/assemble/rawedit.py
|
def cutadaptit_pairs(data, sample):
"""
Applies trim & filters to pairs, including adapter detection. If we have
barcode information then we use it to trim reversecut+bcode+adapter from
reverse read, if not then we have to apply a more general cut to make sure
we remove the barcode, this uses wildcards and so will have more false
positives that trim a little extra from the ends of reads. Should we add
a warning about this when filter_adapters=2 and no barcodes?
"""
LOGGER.debug("Entering cutadaptit_pairs - {}".format(sample.name))
sname = sample.name
## applied to read pairs
#trim_r1 = str(data.paramsdict["edit_cutsites"][0])
#trim_r2 = str(data.paramsdict["edit_cutsites"][1])
finput_r1 = sample.files.concat[0][0]
finput_r2 = sample.files.concat[0][1]
## Get adapter sequences. This is very important. For the forward adapter
## we don't care all that much about getting the sequence just before the
## Illumina adapter, b/c it will either be random (in RAD), or the reverse
## cut site of cut1 or cut2 (gbs or ddrad). Either way, we can still trim it
## off later in step7 with trim overhang if we want. And it should be invar-
## iable unless the cut site has an ambiguous char. The reverse adapter is
## super important, however b/c it can contain the inline barcode and
## revcomp cut site. We def want to trim out the barcode, and ideally the
## cut site too to be safe. Problem is we don't always know the barcode if
## users demultiplexed their data elsewhere. So, if barcode is missing we
## do a very fuzzy match before the adapter and trim it out.
## this just got more complicated now that we allow merging technical
## replicates in step 1 since a single sample might have multiple barcodes
## associated with it and so we need to search for multiple adapter+barcode
## combinations.
## We will assume that if they are 'linking_barcodes()' here then there are
## no technical replicates in the barcodes file. If there ARE technical
## replicates, then they should run step1 so they are merged, in which case
## the sample specific barcodes will be saved to each Sample under its
## .barcode attribute as a list.
if not data.barcodes:
## try linking barcodes again in case user just added a barcodes path
## after receiving the warning. We assume no technical replicates here.
try:
data._link_barcodes()
except Exception as inst:
LOGGER.warning(" error adding barcodes info: %s", inst)
## barcodes are present meaning they were parsed to the samples in step 1.
if data.barcodes:
try:
adapter1 = fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
if isinstance(sample.barcode, list):
bcode = fullcomp(sample.barcode[0])[::-1]
elif isinstance(data.barcodes[sample.name], list):
bcode = fullcomp(data.barcodes[sample.name][0][::-1])
else:
bcode = fullcomp(data.barcodes[sample.name])[::-1]
## add full adapter (-revcompcut-revcompbcode-adapter)
adapter2 = fullcomp(data.paramsdict["restriction_overhang"][0])[::-1] \
+ bcode \
+ data._hackersonly["p5_adapter"]
except KeyError as inst:
msg = """
Sample name does not exist in the barcode file. The name in the barcode file
for each sample must exactly equal the raw file name for the sample minus
`_R1`. So for example a sample called WatDo_PipPrep_R1_100.fq.gz must
be referenced in the barcode file as WatDo_PipPrep_100. The name in your
barcode file for this sample must match: {}
""".format(sample.name)
LOGGER.error(msg)
raise IPyradWarningExit(msg)
else:
print(NO_BARS_GBS_WARNING)
#adapter1 = fullcomp(data.paramsdict["restriction_overhang"][1])[::-1]+\
# data._hackersonly["p3_adapter"]
#adapter2 = "XXX"
adapter1 = data._hackersonly["p3_adapter"]
adapter2 = fullcomp(data._hackersonly["p5_adapter"])
## parse trim_reads
trim5r1 = trim5r2 = trim3r1 = trim3r2 = []
if data.paramsdict.get("trim_reads"):
trimlen = data.paramsdict.get("trim_reads")
## trim 5' end
if trimlen[0]:
trim5r1 = ["-u", str(trimlen[0])]
if trimlen[1] < 0:
trim3r1 = ["-u", str(trimlen[1])]
if trimlen[1] > 0:
trim3r1 = ["--length", str(trimlen[1])]
## legacy support for trimlen = 0,0 default
if len(trimlen) > 2:
if trimlen[2]:
trim5r2 = ["-U", str(trimlen[2])]
if len(trimlen) > 3:
if trimlen[3]:
if trimlen[3] < 0:
trim3r2 = ["-U", str(trimlen[3])]
if trimlen[3] > 0:
trim3r2 = ["--length", str(trimlen[3])]
else:
## legacy support
trimlen = data.paramsdict.get("edit_cutsites")
trim5r1 = ["-u", str(trimlen[0])]
trim5r2 = ["-U", str(trimlen[1])]
## testing new 'trim_reads' setting
cmdf1 = ["cutadapt"]
if trim5r1:
cmdf1 += trim5r1
if trim3r1:
cmdf1 += trim3r1
if trim5r2:
cmdf1 += trim5r2
if trim3r2:
cmdf1 += trim3r2
cmdf1 += ["--trim-n",
"--max-n", str(data.paramsdict["max_low_qual_bases"]),
"--minimum-length", str(data.paramsdict["filter_min_trim_len"]),
"-o", OPJ(data.dirs.edits, sname+".trimmed_R1_.fastq.gz"),
"-p", OPJ(data.dirs.edits, sname+".trimmed_R2_.fastq.gz"),
finput_r1,
finput_r2]
## additional args
if int(data.paramsdict["filter_adapters"]) < 2:
## add a dummy adapter to let cutadapt know whe are not using legacy-mode
cmdf1.insert(1, "XXX")
cmdf1.insert(1, "-A")
if int(data.paramsdict["filter_adapters"]):
cmdf1.insert(1, "20,20")
cmdf1.insert(1, "-q")
cmdf1.insert(1, str(data.paramsdict["phred_Qscore_offset"]))
cmdf1.insert(1, "--quality-base")
if int(data.paramsdict["filter_adapters"]) > 1:
## if technical replicates then add other copies
if isinstance(sample.barcode, list):
for extrabar in sample.barcode[1:]:
data._hackersonly["p5_adapters_extra"] += \
fullcomp(data.paramsdict["restriction_overhang"][0])[::-1] + \
fullcomp(extrabar)[::-1] + \
data._hackersonly["p5_adapter"]
data._hackersonly["p5_adapters_extra"] += \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] + \
data._hackersonly["p3_adapter"]
## first enter extra cuts
zcut1 = list(set(data._hackersonly["p3_adapters_extra"]))[::-1]
zcut2 = list(set(data._hackersonly["p5_adapters_extra"]))[::-1]
for ecut1, ecut2 in zip(zcut1, zcut2):
cmdf1.insert(1, ecut1)
cmdf1.insert(1, "-a")
cmdf1.insert(1, ecut2)
cmdf1.insert(1, "-A")
## then put the main cut first
cmdf1.insert(1, adapter1)
cmdf1.insert(1, '-a')
cmdf1.insert(1, adapter2)
cmdf1.insert(1, '-A')
## do modifications to read1 and write to tmp file
LOGGER.debug(" ".join(cmdf1))
#sys.exit()
try:
proc1 = sps.Popen(cmdf1, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
res1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
LOGGER.info("this is where I want it to interrupt")
raise KeyboardInterrupt()
## raise errors if found
if proc1.returncode:
raise IPyradWarningExit(" error [returncode={}]: {}\n{}"\
.format(proc1.returncode, " ".join(cmdf1), res1))
LOGGER.debug("Exiting cutadaptit_pairs - {}".format(sname))
## return results string to be parsed outside of engine
return res1
|
def cutadaptit_pairs(data, sample):
"""
Applies trim & filters to pairs, including adapter detection. If we have
barcode information then we use it to trim reversecut+bcode+adapter from
reverse read, if not then we have to apply a more general cut to make sure
we remove the barcode, this uses wildcards and so will have more false
positives that trim a little extra from the ends of reads. Should we add
a warning about this when filter_adapters=2 and no barcodes?
"""
LOGGER.debug("Entering cutadaptit_pairs - {}".format(sample.name))
sname = sample.name
## applied to read pairs
#trim_r1 = str(data.paramsdict["edit_cutsites"][0])
#trim_r2 = str(data.paramsdict["edit_cutsites"][1])
finput_r1 = sample.files.concat[0][0]
finput_r2 = sample.files.concat[0][1]
## Get adapter sequences. This is very important. For the forward adapter
## we don't care all that much about getting the sequence just before the
## Illumina adapter, b/c it will either be random (in RAD), or the reverse
## cut site of cut1 or cut2 (gbs or ddrad). Either way, we can still trim it
## off later in step7 with trim overhang if we want. And it should be invar-
## iable unless the cut site has an ambiguous char. The reverse adapter is
## super important, however b/c it can contain the inline barcode and
## revcomp cut site. We def want to trim out the barcode, and ideally the
## cut site too to be safe. Problem is we don't always know the barcode if
## users demultiplexed their data elsewhere. So, if barcode is missing we
## do a very fuzzy match before the adapter and trim it out.
## this just got more complicated now that we allow merging technical
## replicates in step 1 since a single sample might have multiple barcodes
## associated with it and so we need to search for multiple adapter+barcode
## combinations.
## We will assume that if they are 'linking_barcodes()' here then there are
## no technical replicates in the barcodes file. If there ARE technical
## replicates, then they should run step1 so they are merged, in which case
## the sample specific barcodes will be saved to each Sample under its
## .barcode attribute as a list.
if not data.barcodes:
## try linking barcodes again in case user just added a barcodes path
## after receiving the warning. We assume no technical replicates here.
try:
data._link_barcodes()
except Exception as inst:
LOGGER.warning(" error adding barcodes info: %s", inst)
## barcodes are present meaning they were parsed to the samples in step 1.
if data.barcodes:
try:
adapter1 = fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
if isinstance(sample.barcode, list):
bcode = fullcomp(sample.barcode[0])[::-1]
elif isinstance(data.barcodes[sample.name], list):
bcode = fullcomp(data.barcodes[sample.name][0][::-1])
else:
bcode = fullcomp(data.barcodes[sample.name])[::-1]
## add full adapter (-revcompcut-revcompbcode-adapter)
adapter2 = fullcomp(data.paramsdict["restriction_overhang"][0])[::-1] \
+ bcode \
+ data._hackersonly["p5_adapter"]
except KeyError as inst:
msg = """
Sample name does not exist in the barcode file. The name in the barcode file
for each sample must exactly equal the raw file name for the sample minus
`_R1`. So for example a sample called WatDo_PipPrep_R1_100.fq.gz must
be referenced in the barcode file as WatDo_PipPrep_100. The name in your
barcode file for this sample must match: {}
""".format(sample.name)
LOGGER.error(msg)
raise IPyradWarningExit(msg)
else:
print(NO_BARS_GBS_WARNING)
#adapter1 = fullcomp(data.paramsdict["restriction_overhang"][1])[::-1]+\
# data._hackersonly["p3_adapter"]
#adapter2 = "XXX"
adapter1 = data._hackersonly["p3_adapter"]
adapter2 = fullcomp(data._hackersonly["p5_adapter"])
## parse trim_reads
trim5r1 = trim5r2 = trim3r1 = trim3r2 = []
if data.paramsdict.get("trim_reads"):
trimlen = data.paramsdict.get("trim_reads")
## trim 5' end
if trimlen[0]:
trim5r1 = ["-u", str(trimlen[0])]
if trimlen[1] < 0:
trim3r1 = ["-u", str(trimlen[1])]
if trimlen[1] > 0:
trim3r1 = ["--length", str(trimlen[1])]
## legacy support for trimlen = 0,0 default
if len(trimlen) > 2:
if trimlen[2]:
trim5r2 = ["-U", str(trimlen[2])]
if len(trimlen) > 3:
if trimlen[3]:
if trimlen[3] < 0:
trim3r2 = ["-U", str(trimlen[3])]
if trimlen[3] > 0:
trim3r2 = ["--length", str(trimlen[3])]
else:
## legacy support
trimlen = data.paramsdict.get("edit_cutsites")
trim5r1 = ["-u", str(trimlen[0])]
trim5r2 = ["-U", str(trimlen[1])]
## testing new 'trim_reads' setting
cmdf1 = ["cutadapt"]
if trim5r1:
cmdf1 += trim5r1
if trim3r1:
cmdf1 += trim3r1
if trim5r2:
cmdf1 += trim5r2
if trim3r2:
cmdf1 += trim3r2
cmdf1 += ["--trim-n",
"--max-n", str(data.paramsdict["max_low_qual_bases"]),
"--minimum-length", str(data.paramsdict["filter_min_trim_len"]),
"-o", OPJ(data.dirs.edits, sname+".trimmed_R1_.fastq.gz"),
"-p", OPJ(data.dirs.edits, sname+".trimmed_R2_.fastq.gz"),
finput_r1,
finput_r2]
## additional args
if int(data.paramsdict["filter_adapters"]) < 2:
## add a dummy adapter to let cutadapt know whe are not using legacy-mode
cmdf1.insert(1, "XXX")
cmdf1.insert(1, "-A")
if int(data.paramsdict["filter_adapters"]):
cmdf1.insert(1, "20,20")
cmdf1.insert(1, "-q")
cmdf1.insert(1, str(data.paramsdict["phred_Qscore_offset"]))
cmdf1.insert(1, "--quality-base")
if int(data.paramsdict["filter_adapters"]) > 1:
## if technical replicates then add other copies
if isinstance(sample.barcode, list):
for extrabar in sample.barcode[1:]:
data._hackersonly["p5_adapters_extra"] += \
fullcomp(data.paramsdict["restriction_overhang"][0])[::-1] + \
fullcomp(extrabar)[::-1] + \
data._hackersonly["p5_adapter"]
data._hackersonly["p5_adapters_extra"] += \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] + \
data._hackersonly["p3_adapter"]
## first enter extra cuts
zcut1 = list(set(data._hackersonly["p3_adapters_extra"]))[::-1]
zcut2 = list(set(data._hackersonly["p5_adapters_extra"]))[::-1]
for ecut1, ecut2 in zip(zcut1, zcut2):
cmdf1.insert(1, ecut1)
cmdf1.insert(1, "-a")
cmdf1.insert(1, ecut2)
cmdf1.insert(1, "-A")
## then put the main cut first
cmdf1.insert(1, adapter1)
cmdf1.insert(1, '-a')
cmdf1.insert(1, adapter2)
cmdf1.insert(1, '-A')
## do modifications to read1 and write to tmp file
LOGGER.debug(" ".join(cmdf1))
#sys.exit()
try:
proc1 = sps.Popen(cmdf1, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
res1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
LOGGER.info("this is where I want it to interrupt")
raise KeyboardInterrupt()
## raise errors if found
if proc1.returncode:
raise IPyradWarningExit(" error [returncode={}]: {}\n{}"\
.format(proc1.returncode, " ".join(cmdf1), res1))
LOGGER.debug("Exiting cutadaptit_pairs - {}".format(sname))
## return results string to be parsed outside of engine
return res1
|
[
"Applies",
"trim",
"&",
"filters",
"to",
"pairs",
"including",
"adapter",
"detection",
".",
"If",
"we",
"have",
"barcode",
"information",
"then",
"we",
"use",
"it",
"to",
"trim",
"reversecut",
"+",
"bcode",
"+",
"adapter",
"from",
"reverse",
"read",
"if",
"not",
"then",
"we",
"have",
"to",
"apply",
"a",
"more",
"general",
"cut",
"to",
"make",
"sure",
"we",
"remove",
"the",
"barcode",
"this",
"uses",
"wildcards",
"and",
"so",
"will",
"have",
"more",
"false",
"positives",
"that",
"trim",
"a",
"little",
"extra",
"from",
"the",
"ends",
"of",
"reads",
".",
"Should",
"we",
"add",
"a",
"warning",
"about",
"this",
"when",
"filter_adapters",
"=",
"2",
"and",
"no",
"barcodes?"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L278-L466
|
[
"def",
"cutadaptit_pairs",
"(",
"data",
",",
"sample",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"Entering cutadaptit_pairs - {}\"",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"sname",
"=",
"sample",
".",
"name",
"## applied to read pairs",
"#trim_r1 = str(data.paramsdict[\"edit_cutsites\"][0])",
"#trim_r2 = str(data.paramsdict[\"edit_cutsites\"][1])",
"finput_r1",
"=",
"sample",
".",
"files",
".",
"concat",
"[",
"0",
"]",
"[",
"0",
"]",
"finput_r2",
"=",
"sample",
".",
"files",
".",
"concat",
"[",
"0",
"]",
"[",
"1",
"]",
"## Get adapter sequences. This is very important. For the forward adapter",
"## we don't care all that much about getting the sequence just before the ",
"## Illumina adapter, b/c it will either be random (in RAD), or the reverse",
"## cut site of cut1 or cut2 (gbs or ddrad). Either way, we can still trim it ",
"## off later in step7 with trim overhang if we want. And it should be invar-",
"## iable unless the cut site has an ambiguous char. The reverse adapter is ",
"## super important, however b/c it can contain the inline barcode and ",
"## revcomp cut site. We def want to trim out the barcode, and ideally the ",
"## cut site too to be safe. Problem is we don't always know the barcode if ",
"## users demultiplexed their data elsewhere. So, if barcode is missing we ",
"## do a very fuzzy match before the adapter and trim it out. ",
"## this just got more complicated now that we allow merging technical",
"## replicates in step 1 since a single sample might have multiple barcodes",
"## associated with it and so we need to search for multiple adapter+barcode",
"## combinations.",
"## We will assume that if they are 'linking_barcodes()' here then there are",
"## no technical replicates in the barcodes file. If there ARE technical",
"## replicates, then they should run step1 so they are merged, in which case",
"## the sample specific barcodes will be saved to each Sample under its",
"## .barcode attribute as a list. ",
"if",
"not",
"data",
".",
"barcodes",
":",
"## try linking barcodes again in case user just added a barcodes path",
"## after receiving the warning. We assume no technical replicates here.",
"try",
":",
"data",
".",
"_link_barcodes",
"(",
")",
"except",
"Exception",
"as",
"inst",
":",
"LOGGER",
".",
"warning",
"(",
"\" error adding barcodes info: %s\"",
",",
"inst",
")",
"## barcodes are present meaning they were parsed to the samples in step 1.",
"if",
"data",
".",
"barcodes",
":",
"try",
":",
"adapter1",
"=",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
"if",
"isinstance",
"(",
"sample",
".",
"barcode",
",",
"list",
")",
":",
"bcode",
"=",
"fullcomp",
"(",
"sample",
".",
"barcode",
"[",
"0",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"elif",
"isinstance",
"(",
"data",
".",
"barcodes",
"[",
"sample",
".",
"name",
"]",
",",
"list",
")",
":",
"bcode",
"=",
"fullcomp",
"(",
"data",
".",
"barcodes",
"[",
"sample",
".",
"name",
"]",
"[",
"0",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"else",
":",
"bcode",
"=",
"fullcomp",
"(",
"data",
".",
"barcodes",
"[",
"sample",
".",
"name",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"## add full adapter (-revcompcut-revcompbcode-adapter)",
"adapter2",
"=",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"0",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"bcode",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapter\"",
"]",
"except",
"KeyError",
"as",
"inst",
":",
"msg",
"=",
"\"\"\"\n Sample name does not exist in the barcode file. The name in the barcode file\n for each sample must exactly equal the raw file name for the sample minus\n `_R1`. So for example a sample called WatDo_PipPrep_R1_100.fq.gz must\n be referenced in the barcode file as WatDo_PipPrep_100. The name in your\n barcode file for this sample must match: {}\n \"\"\"",
".",
"format",
"(",
"sample",
".",
"name",
")",
"LOGGER",
".",
"error",
"(",
"msg",
")",
"raise",
"IPyradWarningExit",
"(",
"msg",
")",
"else",
":",
"print",
"(",
"NO_BARS_GBS_WARNING",
")",
"#adapter1 = fullcomp(data.paramsdict[\"restriction_overhang\"][1])[::-1]+\\",
"# data._hackersonly[\"p3_adapter\"]",
"#adapter2 = \"XXX\"",
"adapter1",
"=",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
"adapter2",
"=",
"fullcomp",
"(",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapter\"",
"]",
")",
"## parse trim_reads",
"trim5r1",
"=",
"trim5r2",
"=",
"trim3r1",
"=",
"trim3r2",
"=",
"[",
"]",
"if",
"data",
".",
"paramsdict",
".",
"get",
"(",
"\"trim_reads\"",
")",
":",
"trimlen",
"=",
"data",
".",
"paramsdict",
".",
"get",
"(",
"\"trim_reads\"",
")",
"## trim 5' end",
"if",
"trimlen",
"[",
"0",
"]",
":",
"trim5r1",
"=",
"[",
"\"-u\"",
",",
"str",
"(",
"trimlen",
"[",
"0",
"]",
")",
"]",
"if",
"trimlen",
"[",
"1",
"]",
"<",
"0",
":",
"trim3r1",
"=",
"[",
"\"-u\"",
",",
"str",
"(",
"trimlen",
"[",
"1",
"]",
")",
"]",
"if",
"trimlen",
"[",
"1",
"]",
">",
"0",
":",
"trim3r1",
"=",
"[",
"\"--length\"",
",",
"str",
"(",
"trimlen",
"[",
"1",
"]",
")",
"]",
"## legacy support for trimlen = 0,0 default",
"if",
"len",
"(",
"trimlen",
")",
">",
"2",
":",
"if",
"trimlen",
"[",
"2",
"]",
":",
"trim5r2",
"=",
"[",
"\"-U\"",
",",
"str",
"(",
"trimlen",
"[",
"2",
"]",
")",
"]",
"if",
"len",
"(",
"trimlen",
")",
">",
"3",
":",
"if",
"trimlen",
"[",
"3",
"]",
":",
"if",
"trimlen",
"[",
"3",
"]",
"<",
"0",
":",
"trim3r2",
"=",
"[",
"\"-U\"",
",",
"str",
"(",
"trimlen",
"[",
"3",
"]",
")",
"]",
"if",
"trimlen",
"[",
"3",
"]",
">",
"0",
":",
"trim3r2",
"=",
"[",
"\"--length\"",
",",
"str",
"(",
"trimlen",
"[",
"3",
"]",
")",
"]",
"else",
":",
"## legacy support",
"trimlen",
"=",
"data",
".",
"paramsdict",
".",
"get",
"(",
"\"edit_cutsites\"",
")",
"trim5r1",
"=",
"[",
"\"-u\"",
",",
"str",
"(",
"trimlen",
"[",
"0",
"]",
")",
"]",
"trim5r2",
"=",
"[",
"\"-U\"",
",",
"str",
"(",
"trimlen",
"[",
"1",
"]",
")",
"]",
"## testing new 'trim_reads' setting",
"cmdf1",
"=",
"[",
"\"cutadapt\"",
"]",
"if",
"trim5r1",
":",
"cmdf1",
"+=",
"trim5r1",
"if",
"trim3r1",
":",
"cmdf1",
"+=",
"trim3r1",
"if",
"trim5r2",
":",
"cmdf1",
"+=",
"trim5r2",
"if",
"trim3r2",
":",
"cmdf1",
"+=",
"trim3r2",
"cmdf1",
"+=",
"[",
"\"--trim-n\"",
",",
"\"--max-n\"",
",",
"str",
"(",
"data",
".",
"paramsdict",
"[",
"\"max_low_qual_bases\"",
"]",
")",
",",
"\"--minimum-length\"",
",",
"str",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_min_trim_len\"",
"]",
")",
",",
"\"-o\"",
",",
"OPJ",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sname",
"+",
"\".trimmed_R1_.fastq.gz\"",
")",
",",
"\"-p\"",
",",
"OPJ",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sname",
"+",
"\".trimmed_R2_.fastq.gz\"",
")",
",",
"finput_r1",
",",
"finput_r2",
"]",
"## additional args",
"if",
"int",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_adapters\"",
"]",
")",
"<",
"2",
":",
"## add a dummy adapter to let cutadapt know whe are not using legacy-mode",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"XXX\"",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"-A\"",
")",
"if",
"int",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_adapters\"",
"]",
")",
":",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"20,20\"",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"-q\"",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"str",
"(",
"data",
".",
"paramsdict",
"[",
"\"phred_Qscore_offset\"",
"]",
")",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"--quality-base\"",
")",
"if",
"int",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_adapters\"",
"]",
")",
">",
"1",
":",
"## if technical replicates then add other copies",
"if",
"isinstance",
"(",
"sample",
".",
"barcode",
",",
"list",
")",
":",
"for",
"extrabar",
"in",
"sample",
".",
"barcode",
"[",
"1",
":",
"]",
":",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapters_extra\"",
"]",
"+=",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"0",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"fullcomp",
"(",
"extrabar",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapter\"",
"]",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapters_extra\"",
"]",
"+=",
"fullcomp",
"(",
"data",
".",
"paramsdict",
"[",
"\"restriction_overhang\"",
"]",
"[",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"+",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapter\"",
"]",
"## first enter extra cuts",
"zcut1",
"=",
"list",
"(",
"set",
"(",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapters_extra\"",
"]",
")",
")",
"[",
":",
":",
"-",
"1",
"]",
"zcut2",
"=",
"list",
"(",
"set",
"(",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapters_extra\"",
"]",
")",
")",
"[",
":",
":",
"-",
"1",
"]",
"for",
"ecut1",
",",
"ecut2",
"in",
"zip",
"(",
"zcut1",
",",
"zcut2",
")",
":",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"ecut1",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"-a\"",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"ecut2",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"\"-A\"",
")",
"## then put the main cut first",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"adapter1",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"'-a'",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"adapter2",
")",
"cmdf1",
".",
"insert",
"(",
"1",
",",
"'-A'",
")",
"## do modifications to read1 and write to tmp file",
"LOGGER",
".",
"debug",
"(",
"\" \"",
".",
"join",
"(",
"cmdf1",
")",
")",
"#sys.exit()",
"try",
":",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmdf1",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"sps",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"res1",
"=",
"proc1",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"except",
"KeyboardInterrupt",
":",
"proc1",
".",
"kill",
"(",
")",
"LOGGER",
".",
"info",
"(",
"\"this is where I want it to interrupt\"",
")",
"raise",
"KeyboardInterrupt",
"(",
")",
"## raise errors if found",
"if",
"proc1",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\" error [returncode={}]: {}\\n{}\"",
".",
"format",
"(",
"proc1",
".",
"returncode",
",",
"\" \"",
".",
"join",
"(",
"cmdf1",
")",
",",
"res1",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"Exiting cutadaptit_pairs - {}\"",
".",
"format",
"(",
"sname",
")",
")",
"## return results string to be parsed outside of engine",
"return",
"res1"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
run2
|
Filter for samples that are already finished with this step, allow others
to run, pass them to parallel client function to filter with cutadapt.
|
ipyrad/assemble/rawedit.py
|
def run2(data, samples, force, ipyclient):
"""
Filter for samples that are already finished with this step, allow others
to run, pass them to parallel client function to filter with cutadapt.
"""
## create output directories
data.dirs.edits = os.path.join(os.path.realpath(
data.paramsdict["project_dir"]),
data.name+"_edits")
if not os.path.exists(data.dirs.edits):
os.makedirs(data.dirs.edits)
## get samples
subsamples = choose_samples(samples, force)
## only allow extra adapters in filters==3,
## and add poly repeats if not in list of adapters
if int(data.paramsdict["filter_adapters"]) == 3:
if not data._hackersonly["p3_adapters_extra"]:
for poly in ["A"*8, "T"*8, "C"*8, "G"*8]:
data._hackersonly["p3_adapters_extra"].append(poly)
if not data._hackersonly["p5_adapters_extra"]:
for poly in ["A"*8, "T"*8, "C"*8, "G"*8]:
data._hackersonly["p5_adapters_extra"].append(poly)
else:
data._hackersonly["p5_adapters_extra"] = []
data._hackersonly["p3_adapters_extra"] = []
## concat is not parallelized (since it's disk limited, generally)
subsamples = concat_reads(data, subsamples, ipyclient)
## cutadapt is parallelized by ncores/2 because cutadapt spawns threads
lbview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2])
run_cutadapt(data, subsamples, lbview)
## cleanup is ...
assembly_cleanup(data)
|
def run2(data, samples, force, ipyclient):
"""
Filter for samples that are already finished with this step, allow others
to run, pass them to parallel client function to filter with cutadapt.
"""
## create output directories
data.dirs.edits = os.path.join(os.path.realpath(
data.paramsdict["project_dir"]),
data.name+"_edits")
if not os.path.exists(data.dirs.edits):
os.makedirs(data.dirs.edits)
## get samples
subsamples = choose_samples(samples, force)
## only allow extra adapters in filters==3,
## and add poly repeats if not in list of adapters
if int(data.paramsdict["filter_adapters"]) == 3:
if not data._hackersonly["p3_adapters_extra"]:
for poly in ["A"*8, "T"*8, "C"*8, "G"*8]:
data._hackersonly["p3_adapters_extra"].append(poly)
if not data._hackersonly["p5_adapters_extra"]:
for poly in ["A"*8, "T"*8, "C"*8, "G"*8]:
data._hackersonly["p5_adapters_extra"].append(poly)
else:
data._hackersonly["p5_adapters_extra"] = []
data._hackersonly["p3_adapters_extra"] = []
## concat is not parallelized (since it's disk limited, generally)
subsamples = concat_reads(data, subsamples, ipyclient)
## cutadapt is parallelized by ncores/2 because cutadapt spawns threads
lbview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2])
run_cutadapt(data, subsamples, lbview)
## cleanup is ...
assembly_cleanup(data)
|
[
"Filter",
"for",
"samples",
"that",
"are",
"already",
"finished",
"with",
"this",
"step",
"allow",
"others",
"to",
"run",
"pass",
"them",
"to",
"parallel",
"client",
"function",
"to",
"filter",
"with",
"cutadapt",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L470-L507
|
[
"def",
"run2",
"(",
"data",
",",
"samples",
",",
"force",
",",
"ipyclient",
")",
":",
"## create output directories ",
"data",
".",
"dirs",
".",
"edits",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"data",
".",
"paramsdict",
"[",
"\"project_dir\"",
"]",
")",
",",
"data",
".",
"name",
"+",
"\"_edits\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"dirs",
".",
"edits",
")",
":",
"os",
".",
"makedirs",
"(",
"data",
".",
"dirs",
".",
"edits",
")",
"## get samples",
"subsamples",
"=",
"choose_samples",
"(",
"samples",
",",
"force",
")",
"## only allow extra adapters in filters==3, ",
"## and add poly repeats if not in list of adapters",
"if",
"int",
"(",
"data",
".",
"paramsdict",
"[",
"\"filter_adapters\"",
"]",
")",
"==",
"3",
":",
"if",
"not",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapters_extra\"",
"]",
":",
"for",
"poly",
"in",
"[",
"\"A\"",
"*",
"8",
",",
"\"T\"",
"*",
"8",
",",
"\"C\"",
"*",
"8",
",",
"\"G\"",
"*",
"8",
"]",
":",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapters_extra\"",
"]",
".",
"append",
"(",
"poly",
")",
"if",
"not",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapters_extra\"",
"]",
":",
"for",
"poly",
"in",
"[",
"\"A\"",
"*",
"8",
",",
"\"T\"",
"*",
"8",
",",
"\"C\"",
"*",
"8",
",",
"\"G\"",
"*",
"8",
"]",
":",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapters_extra\"",
"]",
".",
"append",
"(",
"poly",
")",
"else",
":",
"data",
".",
"_hackersonly",
"[",
"\"p5_adapters_extra\"",
"]",
"=",
"[",
"]",
"data",
".",
"_hackersonly",
"[",
"\"p3_adapters_extra\"",
"]",
"=",
"[",
"]",
"## concat is not parallelized (since it's disk limited, generally)",
"subsamples",
"=",
"concat_reads",
"(",
"data",
",",
"subsamples",
",",
"ipyclient",
")",
"## cutadapt is parallelized by ncores/2 because cutadapt spawns threads",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"ipyclient",
".",
"ids",
"[",
":",
":",
"2",
"]",
")",
"run_cutadapt",
"(",
"data",
",",
"subsamples",
",",
"lbview",
")",
"## cleanup is ...",
"assembly_cleanup",
"(",
"data",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
concat_reads
|
concatenate if multiple input files for a single samples
|
ipyrad/assemble/rawedit.py
|
def concat_reads(data, subsamples, ipyclient):
""" concatenate if multiple input files for a single samples """
## concatenate reads if they come from merged assemblies.
if any([len(i.files.fastqs) > 1 for i in subsamples]):
## run on single engine for now
start = time.time()
printstr = " concatenating inputs | {} | s2 |"
finished = 0
catjobs = {}
for sample in subsamples:
if len(sample.files.fastqs) > 1:
catjobs[sample.name] = ipyclient[0].apply(\
concat_multiple_inputs, *(data, sample))
else:
sample.files.concat = sample.files.fastqs
## wait for all to finish
while 1:
finished = sum([i.ready() for i in catjobs.values()])
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(catjobs), finished, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if finished == len(catjobs):
print("")
break
## collect results, which are concat file handles.
for async in catjobs:
if catjobs[async].successful():
data.samples[async].files.concat = catjobs[async].result()
else:
error = catjobs[async].result()#exception()
LOGGER.error("error in step2 concat %s", error)
raise IPyradWarningExit("error in step2 concat: {}".format(error))
else:
for sample in subsamples:
## just copy fastqs handles to concat attribute
sample.files.concat = sample.files.fastqs
return subsamples
|
def concat_reads(data, subsamples, ipyclient):
""" concatenate if multiple input files for a single samples """
## concatenate reads if they come from merged assemblies.
if any([len(i.files.fastqs) > 1 for i in subsamples]):
## run on single engine for now
start = time.time()
printstr = " concatenating inputs | {} | s2 |"
finished = 0
catjobs = {}
for sample in subsamples:
if len(sample.files.fastqs) > 1:
catjobs[sample.name] = ipyclient[0].apply(\
concat_multiple_inputs, *(data, sample))
else:
sample.files.concat = sample.files.fastqs
## wait for all to finish
while 1:
finished = sum([i.ready() for i in catjobs.values()])
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(catjobs), finished, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if finished == len(catjobs):
print("")
break
## collect results, which are concat file handles.
for async in catjobs:
if catjobs[async].successful():
data.samples[async].files.concat = catjobs[async].result()
else:
error = catjobs[async].result()#exception()
LOGGER.error("error in step2 concat %s", error)
raise IPyradWarningExit("error in step2 concat: {}".format(error))
else:
for sample in subsamples:
## just copy fastqs handles to concat attribute
sample.files.concat = sample.files.fastqs
return subsamples
|
[
"concatenate",
"if",
"multiple",
"input",
"files",
"for",
"a",
"single",
"samples"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L525-L565
|
[
"def",
"concat_reads",
"(",
"data",
",",
"subsamples",
",",
"ipyclient",
")",
":",
"## concatenate reads if they come from merged assemblies.",
"if",
"any",
"(",
"[",
"len",
"(",
"i",
".",
"files",
".",
"fastqs",
")",
">",
"1",
"for",
"i",
"in",
"subsamples",
"]",
")",
":",
"## run on single engine for now",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" concatenating inputs | {} | s2 |\"",
"finished",
"=",
"0",
"catjobs",
"=",
"{",
"}",
"for",
"sample",
"in",
"subsamples",
":",
"if",
"len",
"(",
"sample",
".",
"files",
".",
"fastqs",
")",
">",
"1",
":",
"catjobs",
"[",
"sample",
".",
"name",
"]",
"=",
"ipyclient",
"[",
"0",
"]",
".",
"apply",
"(",
"concat_multiple_inputs",
",",
"*",
"(",
"data",
",",
"sample",
")",
")",
"else",
":",
"sample",
".",
"files",
".",
"concat",
"=",
"sample",
".",
"files",
".",
"fastqs",
"## wait for all to finish",
"while",
"1",
":",
"finished",
"=",
"sum",
"(",
"[",
"i",
".",
"ready",
"(",
")",
"for",
"i",
"in",
"catjobs",
".",
"values",
"(",
")",
"]",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"len",
"(",
"catjobs",
")",
",",
"finished",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"finished",
"==",
"len",
"(",
"catjobs",
")",
":",
"print",
"(",
"\"\"",
")",
"break",
"## collect results, which are concat file handles.",
"for",
"async",
"in",
"catjobs",
":",
"if",
"catjobs",
"[",
"async",
"]",
".",
"successful",
"(",
")",
":",
"data",
".",
"samples",
"[",
"async",
"]",
".",
"files",
".",
"concat",
"=",
"catjobs",
"[",
"async",
"]",
".",
"result",
"(",
")",
"else",
":",
"error",
"=",
"catjobs",
"[",
"async",
"]",
".",
"result",
"(",
")",
"#exception()",
"LOGGER",
".",
"error",
"(",
"\"error in step2 concat %s\"",
",",
"error",
")",
"raise",
"IPyradWarningExit",
"(",
"\"error in step2 concat: {}\"",
".",
"format",
"(",
"error",
")",
")",
"else",
":",
"for",
"sample",
"in",
"subsamples",
":",
"## just copy fastqs handles to concat attribute",
"sample",
".",
"files",
".",
"concat",
"=",
"sample",
".",
"files",
".",
"fastqs",
"return",
"subsamples"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
run_cutadapt
|
sends fastq files to cutadapt
|
ipyrad/assemble/rawedit.py
|
def run_cutadapt(data, subsamples, lbview):
"""
sends fastq files to cutadapt
"""
## choose cutadapt function based on datatype
start = time.time()
printstr = " processing reads | {} | s2 |"
finished = 0
rawedits = {}
## sort subsamples so that the biggest files get submitted first
subsamples.sort(key=lambda x: x.stats.reads_raw, reverse=True)
LOGGER.info([i.stats.reads_raw for i in subsamples])
## send samples to cutadapt filtering
if "pair" in data.paramsdict["datatype"]:
for sample in subsamples:
rawedits[sample.name] = lbview.apply(cutadaptit_pairs, *(data, sample))
else:
for sample in subsamples:
rawedits[sample.name] = lbview.apply(cutadaptit_single, *(data, sample))
## wait for all to finish
while 1:
finished = sum([i.ready() for i in rawedits.values()])
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(rawedits), finished, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if finished == len(rawedits):
print("")
break
## collect results, report failures, and store stats. async = sample.name
for async in rawedits:
if rawedits[async].successful():
res = rawedits[async].result()
## if single cleanup is easy
if "pair" not in data.paramsdict["datatype"]:
parse_single_results(data, data.samples[async], res)
else:
parse_pair_results(data, data.samples[async], res)
else:
print(" found an error in step2; see ipyrad_log.txt")
LOGGER.error("error in run_cutadapt(): %s", rawedits[async].exception())
|
def run_cutadapt(data, subsamples, lbview):
"""
sends fastq files to cutadapt
"""
## choose cutadapt function based on datatype
start = time.time()
printstr = " processing reads | {} | s2 |"
finished = 0
rawedits = {}
## sort subsamples so that the biggest files get submitted first
subsamples.sort(key=lambda x: x.stats.reads_raw, reverse=True)
LOGGER.info([i.stats.reads_raw for i in subsamples])
## send samples to cutadapt filtering
if "pair" in data.paramsdict["datatype"]:
for sample in subsamples:
rawedits[sample.name] = lbview.apply(cutadaptit_pairs, *(data, sample))
else:
for sample in subsamples:
rawedits[sample.name] = lbview.apply(cutadaptit_single, *(data, sample))
## wait for all to finish
while 1:
finished = sum([i.ready() for i in rawedits.values()])
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(rawedits), finished, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if finished == len(rawedits):
print("")
break
## collect results, report failures, and store stats. async = sample.name
for async in rawedits:
if rawedits[async].successful():
res = rawedits[async].result()
## if single cleanup is easy
if "pair" not in data.paramsdict["datatype"]:
parse_single_results(data, data.samples[async], res)
else:
parse_pair_results(data, data.samples[async], res)
else:
print(" found an error in step2; see ipyrad_log.txt")
LOGGER.error("error in run_cutadapt(): %s", rawedits[async].exception())
|
[
"sends",
"fastq",
"files",
"to",
"cutadapt"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L569-L613
|
[
"def",
"run_cutadapt",
"(",
"data",
",",
"subsamples",
",",
"lbview",
")",
":",
"## choose cutadapt function based on datatype",
"start",
"=",
"time",
".",
"time",
"(",
")",
"printstr",
"=",
"\" processing reads | {} | s2 |\"",
"finished",
"=",
"0",
"rawedits",
"=",
"{",
"}",
"## sort subsamples so that the biggest files get submitted first",
"subsamples",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"stats",
".",
"reads_raw",
",",
"reverse",
"=",
"True",
")",
"LOGGER",
".",
"info",
"(",
"[",
"i",
".",
"stats",
".",
"reads_raw",
"for",
"i",
"in",
"subsamples",
"]",
")",
"## send samples to cutadapt filtering",
"if",
"\"pair\"",
"in",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
":",
"for",
"sample",
"in",
"subsamples",
":",
"rawedits",
"[",
"sample",
".",
"name",
"]",
"=",
"lbview",
".",
"apply",
"(",
"cutadaptit_pairs",
",",
"*",
"(",
"data",
",",
"sample",
")",
")",
"else",
":",
"for",
"sample",
"in",
"subsamples",
":",
"rawedits",
"[",
"sample",
".",
"name",
"]",
"=",
"lbview",
".",
"apply",
"(",
"cutadaptit_single",
",",
"*",
"(",
"data",
",",
"sample",
")",
")",
"## wait for all to finish",
"while",
"1",
":",
"finished",
"=",
"sum",
"(",
"[",
"i",
".",
"ready",
"(",
")",
"for",
"i",
"in",
"rawedits",
".",
"values",
"(",
")",
"]",
")",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"progressbar",
"(",
"len",
"(",
"rawedits",
")",
",",
"finished",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"data",
".",
"_spacer",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"finished",
"==",
"len",
"(",
"rawedits",
")",
":",
"print",
"(",
"\"\"",
")",
"break",
"## collect results, report failures, and store stats. async = sample.name",
"for",
"async",
"in",
"rawedits",
":",
"if",
"rawedits",
"[",
"async",
"]",
".",
"successful",
"(",
")",
":",
"res",
"=",
"rawedits",
"[",
"async",
"]",
".",
"result",
"(",
")",
"## if single cleanup is easy",
"if",
"\"pair\"",
"not",
"in",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
":",
"parse_single_results",
"(",
"data",
",",
"data",
".",
"samples",
"[",
"async",
"]",
",",
"res",
")",
"else",
":",
"parse_pair_results",
"(",
"data",
",",
"data",
".",
"samples",
"[",
"async",
"]",
",",
"res",
")",
"else",
":",
"print",
"(",
"\" found an error in step2; see ipyrad_log.txt\"",
")",
"LOGGER",
".",
"error",
"(",
"\"error in run_cutadapt(): %s\"",
",",
"rawedits",
"[",
"async",
"]",
".",
"exception",
"(",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
choose_samples
|
filter out samples that are already done with this step, unless force
|
ipyrad/assemble/rawedit.py
|
def choose_samples(samples, force):
""" filter out samples that are already done with this step, unless force"""
## hold samples that pass
subsamples = []
## filter the samples again
if not force:
for sample in samples:
if sample.stats.state >= 2:
print("""\
Skipping Sample {}; Already filtered. Use force argument to overwrite.\
""".format(sample.name))
elif not sample.stats.reads_raw:
print("""\
Skipping Sample {}; No reads found in file {}\
""".format(sample.name, sample.files.fastqs))
else:
subsamples.append(sample)
else:
for sample in samples:
if not sample.stats.reads_raw:
print("""\
Skipping Sample {}; No reads found in file {}\
""".format(sample.name, sample.files.fastqs))
else:
subsamples.append(sample)
return subsamples
|
def choose_samples(samples, force):
""" filter out samples that are already done with this step, unless force"""
## hold samples that pass
subsamples = []
## filter the samples again
if not force:
for sample in samples:
if sample.stats.state >= 2:
print("""\
Skipping Sample {}; Already filtered. Use force argument to overwrite.\
""".format(sample.name))
elif not sample.stats.reads_raw:
print("""\
Skipping Sample {}; No reads found in file {}\
""".format(sample.name, sample.files.fastqs))
else:
subsamples.append(sample)
else:
for sample in samples:
if not sample.stats.reads_raw:
print("""\
Skipping Sample {}; No reads found in file {}\
""".format(sample.name, sample.files.fastqs))
else:
subsamples.append(sample)
return subsamples
|
[
"filter",
"out",
"samples",
"that",
"are",
"already",
"done",
"with",
"this",
"step",
"unless",
"force"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L617-L644
|
[
"def",
"choose_samples",
"(",
"samples",
",",
"force",
")",
":",
"## hold samples that pass",
"subsamples",
"=",
"[",
"]",
"## filter the samples again",
"if",
"not",
"force",
":",
"for",
"sample",
"in",
"samples",
":",
"if",
"sample",
".",
"stats",
".",
"state",
">=",
"2",
":",
"print",
"(",
"\"\"\"\\\n Skipping Sample {}; Already filtered. Use force argument to overwrite.\\\n \"\"\"",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"elif",
"not",
"sample",
".",
"stats",
".",
"reads_raw",
":",
"print",
"(",
"\"\"\"\\\n Skipping Sample {}; No reads found in file {}\\\n \"\"\"",
".",
"format",
"(",
"sample",
".",
"name",
",",
"sample",
".",
"files",
".",
"fastqs",
")",
")",
"else",
":",
"subsamples",
".",
"append",
"(",
"sample",
")",
"else",
":",
"for",
"sample",
"in",
"samples",
":",
"if",
"not",
"sample",
".",
"stats",
".",
"reads_raw",
":",
"print",
"(",
"\"\"\"\\\n Skipping Sample {}; No reads found in file {}\\\n \"\"\"",
".",
"format",
"(",
"sample",
".",
"name",
",",
"sample",
".",
"files",
".",
"fastqs",
")",
")",
"else",
":",
"subsamples",
".",
"append",
"(",
"sample",
")",
"return",
"subsamples"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
concat_multiple_inputs
|
If multiple fastq files were appended into the list of fastqs for samples
then we merge them here before proceeding.
|
ipyrad/assemble/rawedit.py
|
def concat_multiple_inputs(data, sample):
"""
If multiple fastq files were appended into the list of fastqs for samples
then we merge them here before proceeding.
"""
## if more than one tuple in fastq list
if len(sample.files.fastqs) > 1:
## create a cat command to append them all (doesn't matter if they
## are gzipped, cat still works). Grab index 0 of tuples for R1s.
cmd1 = ["cat"] + [i[0] for i in sample.files.fastqs]
isgzip = ".gz"
if not sample.files.fastqs[0][0].endswith(".gz"):
isgzip = ""
## write to new concat handle
conc1 = os.path.join(data.dirs.edits, sample.name+"_R1_concat.fq{}".format(isgzip))
with open(conc1, 'w') as cout1:
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=cout1, close_fds=True)
res1 = proc1.communicate()[0]
if proc1.returncode:
raise IPyradWarningExit("error in: {}, {}".format(cmd1, res1))
## Only set conc2 if R2 actually exists
conc2 = 0
if "pair" in data.paramsdict["datatype"]:
cmd2 = ["cat"] + [i[1] for i in sample.files.fastqs]
conc2 = os.path.join(data.dirs.edits, sample.name+"_R2_concat.fq{}".format(isgzip))
with open(conc2, 'w') as cout2:
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=cout2, close_fds=True)
res2 = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("Error concatenating fastq files. Make sure all "\
+ "these files exist: {}\nError message: {}".format(cmd2, proc2.returncode))
## store new file handles
sample.files.concat = [(conc1, conc2)]
return sample.files.concat
|
def concat_multiple_inputs(data, sample):
"""
If multiple fastq files were appended into the list of fastqs for samples
then we merge them here before proceeding.
"""
## if more than one tuple in fastq list
if len(sample.files.fastqs) > 1:
## create a cat command to append them all (doesn't matter if they
## are gzipped, cat still works). Grab index 0 of tuples for R1s.
cmd1 = ["cat"] + [i[0] for i in sample.files.fastqs]
isgzip = ".gz"
if not sample.files.fastqs[0][0].endswith(".gz"):
isgzip = ""
## write to new concat handle
conc1 = os.path.join(data.dirs.edits, sample.name+"_R1_concat.fq{}".format(isgzip))
with open(conc1, 'w') as cout1:
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=cout1, close_fds=True)
res1 = proc1.communicate()[0]
if proc1.returncode:
raise IPyradWarningExit("error in: {}, {}".format(cmd1, res1))
## Only set conc2 if R2 actually exists
conc2 = 0
if "pair" in data.paramsdict["datatype"]:
cmd2 = ["cat"] + [i[1] for i in sample.files.fastqs]
conc2 = os.path.join(data.dirs.edits, sample.name+"_R2_concat.fq{}".format(isgzip))
with open(conc2, 'w') as cout2:
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=cout2, close_fds=True)
res2 = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("Error concatenating fastq files. Make sure all "\
+ "these files exist: {}\nError message: {}".format(cmd2, proc2.returncode))
## store new file handles
sample.files.concat = [(conc1, conc2)]
return sample.files.concat
|
[
"If",
"multiple",
"fastq",
"files",
"were",
"appended",
"into",
"the",
"list",
"of",
"fastqs",
"for",
"samples",
"then",
"we",
"merge",
"them",
"here",
"before",
"proceeding",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L648-L686
|
[
"def",
"concat_multiple_inputs",
"(",
"data",
",",
"sample",
")",
":",
"## if more than one tuple in fastq list",
"if",
"len",
"(",
"sample",
".",
"files",
".",
"fastqs",
")",
">",
"1",
":",
"## create a cat command to append them all (doesn't matter if they ",
"## are gzipped, cat still works). Grab index 0 of tuples for R1s.",
"cmd1",
"=",
"[",
"\"cat\"",
"]",
"+",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"sample",
".",
"files",
".",
"fastqs",
"]",
"isgzip",
"=",
"\".gz\"",
"if",
"not",
"sample",
".",
"files",
".",
"fastqs",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"endswith",
"(",
"\".gz\"",
")",
":",
"isgzip",
"=",
"\"\"",
"## write to new concat handle",
"conc1",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sample",
".",
"name",
"+",
"\"_R1_concat.fq{}\"",
".",
"format",
"(",
"isgzip",
")",
")",
"with",
"open",
"(",
"conc1",
",",
"'w'",
")",
"as",
"cout1",
":",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmd1",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"cout1",
",",
"close_fds",
"=",
"True",
")",
"res1",
"=",
"proc1",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"if",
"proc1",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\"error in: {}, {}\"",
".",
"format",
"(",
"cmd1",
",",
"res1",
")",
")",
"## Only set conc2 if R2 actually exists",
"conc2",
"=",
"0",
"if",
"\"pair\"",
"in",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
":",
"cmd2",
"=",
"[",
"\"cat\"",
"]",
"+",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"sample",
".",
"files",
".",
"fastqs",
"]",
"conc2",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sample",
".",
"name",
"+",
"\"_R2_concat.fq{}\"",
".",
"format",
"(",
"isgzip",
")",
")",
"with",
"open",
"(",
"conc2",
",",
"'w'",
")",
"as",
"cout2",
":",
"proc2",
"=",
"sps",
".",
"Popen",
"(",
"cmd2",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"cout2",
",",
"close_fds",
"=",
"True",
")",
"res2",
"=",
"proc2",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"if",
"proc2",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\"Error concatenating fastq files. Make sure all \"",
"+",
"\"these files exist: {}\\nError message: {}\"",
".",
"format",
"(",
"cmd2",
",",
"proc2",
".",
"returncode",
")",
")",
"## store new file handles",
"sample",
".",
"files",
".",
"concat",
"=",
"[",
"(",
"conc1",
",",
"conc2",
")",
"]",
"return",
"sample",
".",
"files",
".",
"concat"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
make
|
Convert vcf from step6 to .loci format to facilitate downstream format conversion
|
ipyrad/file_conversion/vcf2loci.py
|
def make( data, samples ):
""" Convert vcf from step6 to .loci format to facilitate downstream format conversion """
invcffile = os.path.join( data.dirs.consens, data.name+".vcf" )
outlocifile = os.path.join( data.dirs.outfiles, data.name+".loci" )
importvcf( invcffile, outlocifile )
|
def make( data, samples ):
""" Convert vcf from step6 to .loci format to facilitate downstream format conversion """
invcffile = os.path.join( data.dirs.consens, data.name+".vcf" )
outlocifile = os.path.join( data.dirs.outfiles, data.name+".loci" )
importvcf( invcffile, outlocifile )
|
[
"Convert",
"vcf",
"from",
"step6",
"to",
".",
"loci",
"format",
"to",
"facilitate",
"downstream",
"format",
"conversion"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/file_conversion/vcf2loci.py#L8-L13
|
[
"def",
"make",
"(",
"data",
",",
"samples",
")",
":",
"invcffile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"consens",
",",
"data",
".",
"name",
"+",
"\".vcf\"",
")",
"outlocifile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"outfiles",
",",
"data",
".",
"name",
"+",
"\".loci\"",
")",
"importvcf",
"(",
"invcffile",
",",
"outlocifile",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
importvcf
|
Function for importing a vcf file into loci format. Arguments
are the input vcffile and the loci file to write out.
|
ipyrad/file_conversion/vcf2loci.py
|
def importvcf( vcffile, locifile ):
""" Function for importing a vcf file into loci format. Arguments
are the input vcffile and the loci file to write out. """
try:
## Get names of all individuals in the vcf
with open( invcffile, 'r' ) as invcf:
for line in invcf:
if line.split()[0] == "#CHROM":
## This is maybe a little clever. The names in the vcf are everything after
## the "FORMAT" column, so find that index, then slice everything after it.
names_col = line.split().index( "FORMAT" ) + 1
names = line.split()[ names_col:]
LOGGER.debug( "Got names - %s", names )
break
print( "wat" )
## Get the column to start reading at
except Exception:
print( "wat" )
|
def importvcf( vcffile, locifile ):
""" Function for importing a vcf file into loci format. Arguments
are the input vcffile and the loci file to write out. """
try:
## Get names of all individuals in the vcf
with open( invcffile, 'r' ) as invcf:
for line in invcf:
if line.split()[0] == "#CHROM":
## This is maybe a little clever. The names in the vcf are everything after
## the "FORMAT" column, so find that index, then slice everything after it.
names_col = line.split().index( "FORMAT" ) + 1
names = line.split()[ names_col:]
LOGGER.debug( "Got names - %s", names )
break
print( "wat" )
## Get the column to start reading at
except Exception:
print( "wat" )
|
[
"Function",
"for",
"importing",
"a",
"vcf",
"file",
"into",
"loci",
"format",
".",
"Arguments",
"are",
"the",
"input",
"vcffile",
"and",
"the",
"loci",
"file",
"to",
"write",
"out",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/file_conversion/vcf2loci.py#L16-L35
|
[
"def",
"importvcf",
"(",
"vcffile",
",",
"locifile",
")",
":",
"try",
":",
"## Get names of all individuals in the vcf",
"with",
"open",
"(",
"invcffile",
",",
"'r'",
")",
"as",
"invcf",
":",
"for",
"line",
"in",
"invcf",
":",
"if",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"==",
"\"#CHROM\"",
":",
"## This is maybe a little clever. The names in the vcf are everything after",
"## the \"FORMAT\" column, so find that index, then slice everything after it.",
"names_col",
"=",
"line",
".",
"split",
"(",
")",
".",
"index",
"(",
"\"FORMAT\"",
")",
"+",
"1",
"names",
"=",
"line",
".",
"split",
"(",
")",
"[",
"names_col",
":",
"]",
"LOGGER",
".",
"debug",
"(",
"\"Got names - %s\"",
",",
"names",
")",
"break",
"print",
"(",
"\"wat\"",
")",
"## Get the column to start reading at",
"except",
"Exception",
":",
"print",
"(",
"\"wat\"",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
get_targets
|
A function to find 2 engines per hostname on the ipyclient.
We'll assume that the CPUs are hyperthreaded, which is why
we grab two. If they are not then no foul. Two multi-threaded
jobs will be run on each of the 2 engines per host.
|
ipyrad/analysis/tetrad.py
|
def get_targets(ipyclient):
"""
A function to find 2 engines per hostname on the ipyclient.
We'll assume that the CPUs are hyperthreaded, which is why
we grab two. If they are not then no foul. Two multi-threaded
jobs will be run on each of the 2 engines per host.
"""
## fill hosts with async[gethostname]
hosts = []
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
hosts.append(engine.apply(socket.gethostname))
## capture results of asyncs
hosts = [i.get() for i in hosts]
hostset = set(hosts)
hostzip = zip(hosts, ipyclient.ids)
hostdict = {host: [i[1] for i in hostzip if i[0] == host] for host in hostset}
targets = list(itertools.chain(*[hostdict[i][:2] for i in hostdict]))
## return first two engines from each host
return targets
|
def get_targets(ipyclient):
"""
A function to find 2 engines per hostname on the ipyclient.
We'll assume that the CPUs are hyperthreaded, which is why
we grab two. If they are not then no foul. Two multi-threaded
jobs will be run on each of the 2 engines per host.
"""
## fill hosts with async[gethostname]
hosts = []
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
hosts.append(engine.apply(socket.gethostname))
## capture results of asyncs
hosts = [i.get() for i in hosts]
hostset = set(hosts)
hostzip = zip(hosts, ipyclient.ids)
hostdict = {host: [i[1] for i in hostzip if i[0] == host] for host in hostset}
targets = list(itertools.chain(*[hostdict[i][:2] for i in hostdict]))
## return first two engines from each host
return targets
|
[
"A",
"function",
"to",
"find",
"2",
"engines",
"per",
"hostname",
"on",
"the",
"ipyclient",
".",
"We",
"ll",
"assume",
"that",
"the",
"CPUs",
"are",
"hyperthreaded",
"which",
"is",
"why",
"we",
"grab",
"two",
".",
"If",
"they",
"are",
"not",
"then",
"no",
"foul",
".",
"Two",
"multi",
"-",
"threaded",
"jobs",
"will",
"be",
"run",
"on",
"each",
"of",
"the",
"2",
"engines",
"per",
"host",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1178-L1200
|
[
"def",
"get_targets",
"(",
"ipyclient",
")",
":",
"## fill hosts with async[gethostname] ",
"hosts",
"=",
"[",
"]",
"for",
"eid",
"in",
"ipyclient",
".",
"ids",
":",
"engine",
"=",
"ipyclient",
"[",
"eid",
"]",
"if",
"not",
"engine",
".",
"outstanding",
":",
"hosts",
".",
"append",
"(",
"engine",
".",
"apply",
"(",
"socket",
".",
"gethostname",
")",
")",
"## capture results of asyncs",
"hosts",
"=",
"[",
"i",
".",
"get",
"(",
")",
"for",
"i",
"in",
"hosts",
"]",
"hostset",
"=",
"set",
"(",
"hosts",
")",
"hostzip",
"=",
"zip",
"(",
"hosts",
",",
"ipyclient",
".",
"ids",
")",
"hostdict",
"=",
"{",
"host",
":",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"hostzip",
"if",
"i",
"[",
"0",
"]",
"==",
"host",
"]",
"for",
"host",
"in",
"hostset",
"}",
"targets",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"[",
"hostdict",
"[",
"i",
"]",
"[",
":",
"2",
"]",
"for",
"i",
"in",
"hostdict",
"]",
")",
")",
"## return first two engines from each host",
"return",
"targets"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
compute_tree_stats
|
compute stats for stats file and NHX tree features
|
ipyrad/analysis/tetrad.py
|
def compute_tree_stats(self, ipyclient):
"""
compute stats for stats file and NHX tree features
"""
## get name indices
names = self.samples
## get majority rule consensus tree of weighted Q bootstrap trees
if self.params.nboots:
## Tree object
fulltre = ete3.Tree(self.trees.tree, format=0)
fulltre.unroot()
## only grab as many boots as the last option said was max
with open(self.trees.boots, 'r') as inboots:
bb = [ete3.Tree(i.strip(), format=0) for i in inboots.readlines()]
wboots = [fulltre] + bb[-self.params.nboots:]
## infer consensus tree and write to file
wctre, wcounts = consensus_tree(wboots, names=names)
self.trees.cons = os.path.join(self.dirs, self.name + ".cons")
with open(self.trees.cons, 'w') as ocons:
ocons.write(wctre.write(format=0))
else:
wctre = ete3.Tree(self.trees.tree, format=0)
wctre.unroot()
## build stats file and write trees
self.trees.nhx = os.path.join(self.dirs, self.name + ".nhx")
with open(self.files.stats, 'w') as ostats:
## print Tetrad info
#ostats.write(STATS_STRING.format(**self.stats))
## print bootstrap splits
if self.params.nboots:
ostats.write("## splits observed in {} trees\n".format(len(wboots)))
for i, j in enumerate(self.samples):
ostats.write("{:<3} {}\n".format(i, j))
ostats.write("\n")
for split, freq in wcounts:
if split.count('1') > 1:
ostats.write("{} {:.2f}\n".format(split, round(freq, 2)))
ostats.write("\n")
## parallelized this function because it can be slogging
lbview = ipyclient.load_balanced_view()
## store results in dicts
qtots = {}
qsamp = {}
tots = sum(1 for i in wctre.iter_leaves())
totn = set(wctre.get_leaf_names())
## iterate over node traversal.
for node in wctre.traverse():
## this is slow, needs to look at every sampled quartet
## so we send it be processed on an engine
qtots[node] = lbview.apply(_get_total, *(tots, node))
qsamp[node] = lbview.apply(_get_sampled, *(self, totn, node))
## wait for jobs to finish
ipyclient.wait()
## put results into tree
for node in wctre.traverse():
## this is fast, just calcs n_choose_k
total = qtots[node].result()
sampled = qsamp[node].result()
## store the results to the tree
node.add_feature("quartets_total", total)
node.add_feature("quartets_sampled", sampled)
features = ["quartets_total", "quartets_sampled"]
## return as NHX format with extra info
with open(self.trees.nhx, 'w') as outtre:
outtre.write(wctre.write(format=0, features=features))
|
def compute_tree_stats(self, ipyclient):
"""
compute stats for stats file and NHX tree features
"""
## get name indices
names = self.samples
## get majority rule consensus tree of weighted Q bootstrap trees
if self.params.nboots:
## Tree object
fulltre = ete3.Tree(self.trees.tree, format=0)
fulltre.unroot()
## only grab as many boots as the last option said was max
with open(self.trees.boots, 'r') as inboots:
bb = [ete3.Tree(i.strip(), format=0) for i in inboots.readlines()]
wboots = [fulltre] + bb[-self.params.nboots:]
## infer consensus tree and write to file
wctre, wcounts = consensus_tree(wboots, names=names)
self.trees.cons = os.path.join(self.dirs, self.name + ".cons")
with open(self.trees.cons, 'w') as ocons:
ocons.write(wctre.write(format=0))
else:
wctre = ete3.Tree(self.trees.tree, format=0)
wctre.unroot()
## build stats file and write trees
self.trees.nhx = os.path.join(self.dirs, self.name + ".nhx")
with open(self.files.stats, 'w') as ostats:
## print Tetrad info
#ostats.write(STATS_STRING.format(**self.stats))
## print bootstrap splits
if self.params.nboots:
ostats.write("## splits observed in {} trees\n".format(len(wboots)))
for i, j in enumerate(self.samples):
ostats.write("{:<3} {}\n".format(i, j))
ostats.write("\n")
for split, freq in wcounts:
if split.count('1') > 1:
ostats.write("{} {:.2f}\n".format(split, round(freq, 2)))
ostats.write("\n")
## parallelized this function because it can be slogging
lbview = ipyclient.load_balanced_view()
## store results in dicts
qtots = {}
qsamp = {}
tots = sum(1 for i in wctre.iter_leaves())
totn = set(wctre.get_leaf_names())
## iterate over node traversal.
for node in wctre.traverse():
## this is slow, needs to look at every sampled quartet
## so we send it be processed on an engine
qtots[node] = lbview.apply(_get_total, *(tots, node))
qsamp[node] = lbview.apply(_get_sampled, *(self, totn, node))
## wait for jobs to finish
ipyclient.wait()
## put results into tree
for node in wctre.traverse():
## this is fast, just calcs n_choose_k
total = qtots[node].result()
sampled = qsamp[node].result()
## store the results to the tree
node.add_feature("quartets_total", total)
node.add_feature("quartets_sampled", sampled)
features = ["quartets_total", "quartets_sampled"]
## return as NHX format with extra info
with open(self.trees.nhx, 'w') as outtre:
outtre.write(wctre.write(format=0, features=features))
|
[
"compute",
"stats",
"for",
"stats",
"file",
"and",
"NHX",
"tree",
"features"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1207-L1285
|
[
"def",
"compute_tree_stats",
"(",
"self",
",",
"ipyclient",
")",
":",
"## get name indices",
"names",
"=",
"self",
".",
"samples",
"## get majority rule consensus tree of weighted Q bootstrap trees",
"if",
"self",
".",
"params",
".",
"nboots",
":",
"## Tree object",
"fulltre",
"=",
"ete3",
".",
"Tree",
"(",
"self",
".",
"trees",
".",
"tree",
",",
"format",
"=",
"0",
")",
"fulltre",
".",
"unroot",
"(",
")",
"## only grab as many boots as the last option said was max",
"with",
"open",
"(",
"self",
".",
"trees",
".",
"boots",
",",
"'r'",
")",
"as",
"inboots",
":",
"bb",
"=",
"[",
"ete3",
".",
"Tree",
"(",
"i",
".",
"strip",
"(",
")",
",",
"format",
"=",
"0",
")",
"for",
"i",
"in",
"inboots",
".",
"readlines",
"(",
")",
"]",
"wboots",
"=",
"[",
"fulltre",
"]",
"+",
"bb",
"[",
"-",
"self",
".",
"params",
".",
"nboots",
":",
"]",
"## infer consensus tree and write to file",
"wctre",
",",
"wcounts",
"=",
"consensus_tree",
"(",
"wboots",
",",
"names",
"=",
"names",
")",
"self",
".",
"trees",
".",
"cons",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".cons\"",
")",
"with",
"open",
"(",
"self",
".",
"trees",
".",
"cons",
",",
"'w'",
")",
"as",
"ocons",
":",
"ocons",
".",
"write",
"(",
"wctre",
".",
"write",
"(",
"format",
"=",
"0",
")",
")",
"else",
":",
"wctre",
"=",
"ete3",
".",
"Tree",
"(",
"self",
".",
"trees",
".",
"tree",
",",
"format",
"=",
"0",
")",
"wctre",
".",
"unroot",
"(",
")",
"## build stats file and write trees",
"self",
".",
"trees",
".",
"nhx",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".nhx\"",
")",
"with",
"open",
"(",
"self",
".",
"files",
".",
"stats",
",",
"'w'",
")",
"as",
"ostats",
":",
"## print Tetrad info",
"#ostats.write(STATS_STRING.format(**self.stats))",
"## print bootstrap splits",
"if",
"self",
".",
"params",
".",
"nboots",
":",
"ostats",
".",
"write",
"(",
"\"## splits observed in {} trees\\n\"",
".",
"format",
"(",
"len",
"(",
"wboots",
")",
")",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"self",
".",
"samples",
")",
":",
"ostats",
".",
"write",
"(",
"\"{:<3} {}\\n\"",
".",
"format",
"(",
"i",
",",
"j",
")",
")",
"ostats",
".",
"write",
"(",
"\"\\n\"",
")",
"for",
"split",
",",
"freq",
"in",
"wcounts",
":",
"if",
"split",
".",
"count",
"(",
"'1'",
")",
">",
"1",
":",
"ostats",
".",
"write",
"(",
"\"{} {:.2f}\\n\"",
".",
"format",
"(",
"split",
",",
"round",
"(",
"freq",
",",
"2",
")",
")",
")",
"ostats",
".",
"write",
"(",
"\"\\n\"",
")",
"## parallelized this function because it can be slogging",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
")",
"## store results in dicts",
"qtots",
"=",
"{",
"}",
"qsamp",
"=",
"{",
"}",
"tots",
"=",
"sum",
"(",
"1",
"for",
"i",
"in",
"wctre",
".",
"iter_leaves",
"(",
")",
")",
"totn",
"=",
"set",
"(",
"wctre",
".",
"get_leaf_names",
"(",
")",
")",
"## iterate over node traversal. ",
"for",
"node",
"in",
"wctre",
".",
"traverse",
"(",
")",
":",
"## this is slow, needs to look at every sampled quartet",
"## so we send it be processed on an engine",
"qtots",
"[",
"node",
"]",
"=",
"lbview",
".",
"apply",
"(",
"_get_total",
",",
"*",
"(",
"tots",
",",
"node",
")",
")",
"qsamp",
"[",
"node",
"]",
"=",
"lbview",
".",
"apply",
"(",
"_get_sampled",
",",
"*",
"(",
"self",
",",
"totn",
",",
"node",
")",
")",
"## wait for jobs to finish",
"ipyclient",
".",
"wait",
"(",
")",
"## put results into tree",
"for",
"node",
"in",
"wctre",
".",
"traverse",
"(",
")",
":",
"## this is fast, just calcs n_choose_k",
"total",
"=",
"qtots",
"[",
"node",
"]",
".",
"result",
"(",
")",
"sampled",
"=",
"qsamp",
"[",
"node",
"]",
".",
"result",
"(",
")",
"## store the results to the tree ",
"node",
".",
"add_feature",
"(",
"\"quartets_total\"",
",",
"total",
")",
"node",
".",
"add_feature",
"(",
"\"quartets_sampled\"",
",",
"sampled",
")",
"features",
"=",
"[",
"\"quartets_total\"",
",",
"\"quartets_sampled\"",
"]",
"## return as NHX format with extra info",
"with",
"open",
"(",
"self",
".",
"trees",
".",
"nhx",
",",
"'w'",
")",
"as",
"outtre",
":",
"outtre",
".",
"write",
"(",
"wctre",
".",
"write",
"(",
"format",
"=",
"0",
",",
"features",
"=",
"features",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
random_combination
|
Random selection from itertools.combinations(iterable, r).
Use this if not sampling all possible quartets.
|
ipyrad/analysis/tetrad.py
|
def random_combination(iterable, nquartets):
"""
Random selection from itertools.combinations(iterable, r).
Use this if not sampling all possible quartets.
"""
pool = tuple(iterable)
size = len(pool)
indices = random.sample(xrange(size), nquartets)
return tuple(pool[i] for i in indices)
|
def random_combination(iterable, nquartets):
"""
Random selection from itertools.combinations(iterable, r).
Use this if not sampling all possible quartets.
"""
pool = tuple(iterable)
size = len(pool)
indices = random.sample(xrange(size), nquartets)
return tuple(pool[i] for i in indices)
|
[
"Random",
"selection",
"from",
"itertools",
".",
"combinations",
"(",
"iterable",
"r",
")",
".",
"Use",
"this",
"if",
"not",
"sampling",
"all",
"possible",
"quartets",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1297-L1305
|
[
"def",
"random_combination",
"(",
"iterable",
",",
"nquartets",
")",
":",
"pool",
"=",
"tuple",
"(",
"iterable",
")",
"size",
"=",
"len",
"(",
"pool",
")",
"indices",
"=",
"random",
".",
"sample",
"(",
"xrange",
"(",
"size",
")",
",",
"nquartets",
")",
"return",
"tuple",
"(",
"pool",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
random_product
|
random sampler for equal_splits func
|
ipyrad/analysis/tetrad.py
|
def random_product(iter1, iter2):
""" random sampler for equal_splits func"""
pool1 = tuple(iter1)
pool2 = tuple(iter2)
ind1 = random.sample(pool1, 2)
ind2 = random.sample(pool2, 2)
return tuple(ind1+ind2)
|
def random_product(iter1, iter2):
""" random sampler for equal_splits func"""
pool1 = tuple(iter1)
pool2 = tuple(iter2)
ind1 = random.sample(pool1, 2)
ind2 = random.sample(pool2, 2)
return tuple(ind1+ind2)
|
[
"random",
"sampler",
"for",
"equal_splits",
"func"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1309-L1315
|
[
"def",
"random_product",
"(",
"iter1",
",",
"iter2",
")",
":",
"pool1",
"=",
"tuple",
"(",
"iter1",
")",
"pool2",
"=",
"tuple",
"(",
"iter2",
")",
"ind1",
"=",
"random",
".",
"sample",
"(",
"pool1",
",",
"2",
")",
"ind2",
"=",
"random",
".",
"sample",
"(",
"pool2",
",",
"2",
")",
"return",
"tuple",
"(",
"ind1",
"+",
"ind2",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
n_choose_k
|
get the number of quartets as n-choose-k. This is used
in equal splits to decide whether a split should be exhaustively sampled
or randomly sampled. Edges near tips can be exhaustive while highly nested
edges probably have too many quartets
|
ipyrad/analysis/tetrad.py
|
def n_choose_k(n, k):
""" get the number of quartets as n-choose-k. This is used
in equal splits to decide whether a split should be exhaustively sampled
or randomly sampled. Edges near tips can be exhaustive while highly nested
edges probably have too many quartets
"""
return int(reduce(MUL, (Fraction(n-i, i+1) for i in range(k)), 1))
|
def n_choose_k(n, k):
""" get the number of quartets as n-choose-k. This is used
in equal splits to decide whether a split should be exhaustively sampled
or randomly sampled. Edges near tips can be exhaustive while highly nested
edges probably have too many quartets
"""
return int(reduce(MUL, (Fraction(n-i, i+1) for i in range(k)), 1))
|
[
"get",
"the",
"number",
"of",
"quartets",
"as",
"n",
"-",
"choose",
"-",
"k",
".",
"This",
"is",
"used",
"in",
"equal",
"splits",
"to",
"decide",
"whether",
"a",
"split",
"should",
"be",
"exhaustively",
"sampled",
"or",
"randomly",
"sampled",
".",
"Edges",
"near",
"tips",
"can",
"be",
"exhaustive",
"while",
"highly",
"nested",
"edges",
"probably",
"have",
"too",
"many",
"quartets"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1319-L1325
|
[
"def",
"n_choose_k",
"(",
"n",
",",
"k",
")",
":",
"return",
"int",
"(",
"reduce",
"(",
"MUL",
",",
"(",
"Fraction",
"(",
"n",
"-",
"i",
",",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"k",
")",
")",
",",
"1",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
count_snps
|
get dstats from the count array and return as a float tuple
|
ipyrad/analysis/tetrad.py
|
def count_snps(mat):
"""
get dstats from the count array and return as a float tuple
"""
## get [aabb, baba, abba, aaab]
snps = np.zeros(4, dtype=np.uint32)
## get concordant (aabb) pis sites
snps[0] = np.uint32(\
mat[0, 5] + mat[0, 10] + mat[0, 15] + \
mat[5, 0] + mat[5, 10] + mat[5, 15] + \
mat[10, 0] + mat[10, 5] + mat[10, 15] + \
mat[15, 0] + mat[15, 5] + mat[15, 10])
## get discordant (baba) sites
for i in range(16):
if i % 5:
snps[1] += mat[i, i]
## get discordant (abba) sites
snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\
mat[4, 1] + mat[6, 9] + mat[7, 13] +\
mat[8, 2] + mat[9, 6] + mat[11, 14] +\
mat[12, 3] + mat[13, 7] + mat[14, 11]
## get autapomorphy sites
snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2]
return snps
|
def count_snps(mat):
"""
get dstats from the count array and return as a float tuple
"""
## get [aabb, baba, abba, aaab]
snps = np.zeros(4, dtype=np.uint32)
## get concordant (aabb) pis sites
snps[0] = np.uint32(\
mat[0, 5] + mat[0, 10] + mat[0, 15] + \
mat[5, 0] + mat[5, 10] + mat[5, 15] + \
mat[10, 0] + mat[10, 5] + mat[10, 15] + \
mat[15, 0] + mat[15, 5] + mat[15, 10])
## get discordant (baba) sites
for i in range(16):
if i % 5:
snps[1] += mat[i, i]
## get discordant (abba) sites
snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\
mat[4, 1] + mat[6, 9] + mat[7, 13] +\
mat[8, 2] + mat[9, 6] + mat[11, 14] +\
mat[12, 3] + mat[13, 7] + mat[14, 11]
## get autapomorphy sites
snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2]
return snps
|
[
"get",
"dstats",
"from",
"the",
"count",
"array",
"and",
"return",
"as",
"a",
"float",
"tuple"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1354-L1383
|
[
"def",
"count_snps",
"(",
"mat",
")",
":",
"## get [aabb, baba, abba, aaab] ",
"snps",
"=",
"np",
".",
"zeros",
"(",
"4",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"## get concordant (aabb) pis sites",
"snps",
"[",
"0",
"]",
"=",
"np",
".",
"uint32",
"(",
"mat",
"[",
"0",
",",
"5",
"]",
"+",
"mat",
"[",
"0",
",",
"10",
"]",
"+",
"mat",
"[",
"0",
",",
"15",
"]",
"+",
"mat",
"[",
"5",
",",
"0",
"]",
"+",
"mat",
"[",
"5",
",",
"10",
"]",
"+",
"mat",
"[",
"5",
",",
"15",
"]",
"+",
"mat",
"[",
"10",
",",
"0",
"]",
"+",
"mat",
"[",
"10",
",",
"5",
"]",
"+",
"mat",
"[",
"10",
",",
"15",
"]",
"+",
"mat",
"[",
"15",
",",
"0",
"]",
"+",
"mat",
"[",
"15",
",",
"5",
"]",
"+",
"mat",
"[",
"15",
",",
"10",
"]",
")",
"## get discordant (baba) sites",
"for",
"i",
"in",
"range",
"(",
"16",
")",
":",
"if",
"i",
"%",
"5",
":",
"snps",
"[",
"1",
"]",
"+=",
"mat",
"[",
"i",
",",
"i",
"]",
"## get discordant (abba) sites",
"snps",
"[",
"2",
"]",
"=",
"mat",
"[",
"1",
",",
"4",
"]",
"+",
"mat",
"[",
"2",
",",
"8",
"]",
"+",
"mat",
"[",
"3",
",",
"12",
"]",
"+",
"mat",
"[",
"4",
",",
"1",
"]",
"+",
"mat",
"[",
"6",
",",
"9",
"]",
"+",
"mat",
"[",
"7",
",",
"13",
"]",
"+",
"mat",
"[",
"8",
",",
"2",
"]",
"+",
"mat",
"[",
"9",
",",
"6",
"]",
"+",
"mat",
"[",
"11",
",",
"14",
"]",
"+",
"mat",
"[",
"12",
",",
"3",
"]",
"+",
"mat",
"[",
"13",
",",
"7",
"]",
"+",
"mat",
"[",
"14",
",",
"11",
"]",
"## get autapomorphy sites",
"snps",
"[",
"3",
"]",
"=",
"(",
"mat",
".",
"sum",
"(",
")",
"-",
"np",
".",
"diag",
"(",
"mat",
")",
".",
"sum",
"(",
")",
")",
"-",
"snps",
"[",
"2",
"]",
"return",
"snps"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
subsample_snps_map
|
removes ncolumns from snparray prior to matrix calculation, and
subsamples 'linked' snps (those from the same RAD locus) such that
for these four samples only 1 SNP per locus is kept. This information
comes from the 'map' array (map file).
|
ipyrad/analysis/tetrad.py
|
def subsample_snps_map(seqchunk, nmask, maparr):
"""
removes ncolumns from snparray prior to matrix calculation, and
subsamples 'linked' snps (those from the same RAD locus) such that
for these four samples only 1 SNP per locus is kept. This information
comes from the 'map' array (map file).
"""
## mask columns that contain Ns
rmask = np.zeros(seqchunk.shape[1], dtype=np.bool_)
## apply mask to the mapfile
last_loc = -1
for idx in xrange(maparr.shape[0]):
if maparr[idx] != last_loc:
if not nmask[idx]:
rmask[idx] = True
last_loc = maparr[idx]
## apply mask
#newarr = seqchunk[:, rmask]
## return smaller Nmasked array
return rmask
|
def subsample_snps_map(seqchunk, nmask, maparr):
"""
removes ncolumns from snparray prior to matrix calculation, and
subsamples 'linked' snps (those from the same RAD locus) such that
for these four samples only 1 SNP per locus is kept. This information
comes from the 'map' array (map file).
"""
## mask columns that contain Ns
rmask = np.zeros(seqchunk.shape[1], dtype=np.bool_)
## apply mask to the mapfile
last_loc = -1
for idx in xrange(maparr.shape[0]):
if maparr[idx] != last_loc:
if not nmask[idx]:
rmask[idx] = True
last_loc = maparr[idx]
## apply mask
#newarr = seqchunk[:, rmask]
## return smaller Nmasked array
return rmask
|
[
"removes",
"ncolumns",
"from",
"snparray",
"prior",
"to",
"matrix",
"calculation",
"and",
"subsamples",
"linked",
"snps",
"(",
"those",
"from",
"the",
"same",
"RAD",
"locus",
")",
"such",
"that",
"for",
"these",
"four",
"samples",
"only",
"1",
"SNP",
"per",
"locus",
"is",
"kept",
".",
"This",
"information",
"comes",
"from",
"the",
"map",
"array",
"(",
"map",
"file",
")",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1412-L1434
|
[
"def",
"subsample_snps_map",
"(",
"seqchunk",
",",
"nmask",
",",
"maparr",
")",
":",
"## mask columns that contain Ns",
"rmask",
"=",
"np",
".",
"zeros",
"(",
"seqchunk",
".",
"shape",
"[",
"1",
"]",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"## apply mask to the mapfile",
"last_loc",
"=",
"-",
"1",
"for",
"idx",
"in",
"xrange",
"(",
"maparr",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"maparr",
"[",
"idx",
"]",
"!=",
"last_loc",
":",
"if",
"not",
"nmask",
"[",
"idx",
"]",
":",
"rmask",
"[",
"idx",
"]",
"=",
"True",
"last_loc",
"=",
"maparr",
"[",
"idx",
"]",
"## apply mask",
"#newarr = seqchunk[:, rmask]",
"## return smaller Nmasked array",
"return",
"rmask"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
chunk_to_matrices
|
numba compiled code to get matrix fast.
arr is a 4 x N seq matrix converted to np.int8
I convert the numbers for ATGC into their respective index for the MAT
matrix, and leave all others as high numbers, i.e., -==45, N==78.
|
ipyrad/analysis/tetrad.py
|
def chunk_to_matrices(narr, mapcol, nmask):
"""
numba compiled code to get matrix fast.
arr is a 4 x N seq matrix converted to np.int8
I convert the numbers for ATGC into their respective index for the MAT
matrix, and leave all others as high numbers, i.e., -==45, N==78.
"""
## get seq alignment and create an empty array for filling
mats = np.zeros((3, 16, 16), dtype=np.uint32)
## replace ints with small ints that index their place in the
## 16x16. This no longer checks for big ints to exclude, so resolve=True
## is now the default, TODO.
last_loc = -1
for idx in xrange(mapcol.shape[0]):
if not nmask[idx]:
if not mapcol[idx] == last_loc:
i = narr[:, idx]
mats[0, (4*i[0])+i[1], (4*i[2])+i[3]] += 1
last_loc = mapcol[idx]
## fill the alternates
x = np.uint8(0)
for y in np.array([0, 4, 8, 12], dtype=np.uint8):
for z in np.array([0, 4, 8, 12], dtype=np.uint8):
mats[1, y:y+np.uint8(4), z:z+np.uint8(4)] = mats[0, x].reshape(4, 4)
mats[2, y:y+np.uint8(4), z:z+np.uint8(4)] = mats[0, x].reshape(4, 4).T
x += np.uint8(1)
return mats
|
def chunk_to_matrices(narr, mapcol, nmask):
"""
numba compiled code to get matrix fast.
arr is a 4 x N seq matrix converted to np.int8
I convert the numbers for ATGC into their respective index for the MAT
matrix, and leave all others as high numbers, i.e., -==45, N==78.
"""
## get seq alignment and create an empty array for filling
mats = np.zeros((3, 16, 16), dtype=np.uint32)
## replace ints with small ints that index their place in the
## 16x16. This no longer checks for big ints to exclude, so resolve=True
## is now the default, TODO.
last_loc = -1
for idx in xrange(mapcol.shape[0]):
if not nmask[idx]:
if not mapcol[idx] == last_loc:
i = narr[:, idx]
mats[0, (4*i[0])+i[1], (4*i[2])+i[3]] += 1
last_loc = mapcol[idx]
## fill the alternates
x = np.uint8(0)
for y in np.array([0, 4, 8, 12], dtype=np.uint8):
for z in np.array([0, 4, 8, 12], dtype=np.uint8):
mats[1, y:y+np.uint8(4), z:z+np.uint8(4)] = mats[0, x].reshape(4, 4)
mats[2, y:y+np.uint8(4), z:z+np.uint8(4)] = mats[0, x].reshape(4, 4).T
x += np.uint8(1)
return mats
|
[
"numba",
"compiled",
"code",
"to",
"get",
"matrix",
"fast",
".",
"arr",
"is",
"a",
"4",
"x",
"N",
"seq",
"matrix",
"converted",
"to",
"np",
".",
"int8",
"I",
"convert",
"the",
"numbers",
"for",
"ATGC",
"into",
"their",
"respective",
"index",
"for",
"the",
"MAT",
"matrix",
"and",
"leave",
"all",
"others",
"as",
"high",
"numbers",
"i",
".",
"e",
".",
"-",
"==",
"45",
"N",
"==",
"78",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1472-L1502
|
[
"def",
"chunk_to_matrices",
"(",
"narr",
",",
"mapcol",
",",
"nmask",
")",
":",
"## get seq alignment and create an empty array for filling",
"mats",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"16",
",",
"16",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"## replace ints with small ints that index their place in the ",
"## 16x16. This no longer checks for big ints to exclude, so resolve=True",
"## is now the default, TODO. ",
"last_loc",
"=",
"-",
"1",
"for",
"idx",
"in",
"xrange",
"(",
"mapcol",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"not",
"nmask",
"[",
"idx",
"]",
":",
"if",
"not",
"mapcol",
"[",
"idx",
"]",
"==",
"last_loc",
":",
"i",
"=",
"narr",
"[",
":",
",",
"idx",
"]",
"mats",
"[",
"0",
",",
"(",
"4",
"*",
"i",
"[",
"0",
"]",
")",
"+",
"i",
"[",
"1",
"]",
",",
"(",
"4",
"*",
"i",
"[",
"2",
"]",
")",
"+",
"i",
"[",
"3",
"]",
"]",
"+=",
"1",
"last_loc",
"=",
"mapcol",
"[",
"idx",
"]",
"## fill the alternates",
"x",
"=",
"np",
".",
"uint8",
"(",
"0",
")",
"for",
"y",
"in",
"np",
".",
"array",
"(",
"[",
"0",
",",
"4",
",",
"8",
",",
"12",
"]",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
":",
"for",
"z",
"in",
"np",
".",
"array",
"(",
"[",
"0",
",",
"4",
",",
"8",
",",
"12",
"]",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
":",
"mats",
"[",
"1",
",",
"y",
":",
"y",
"+",
"np",
".",
"uint8",
"(",
"4",
")",
",",
"z",
":",
"z",
"+",
"np",
".",
"uint8",
"(",
"4",
")",
"]",
"=",
"mats",
"[",
"0",
",",
"x",
"]",
".",
"reshape",
"(",
"4",
",",
"4",
")",
"mats",
"[",
"2",
",",
"y",
":",
"y",
"+",
"np",
".",
"uint8",
"(",
"4",
")",
",",
"z",
":",
"z",
"+",
"np",
".",
"uint8",
"(",
"4",
")",
"]",
"=",
"mats",
"[",
"0",
",",
"x",
"]",
".",
"reshape",
"(",
"4",
",",
"4",
")",
".",
"T",
"x",
"+=",
"np",
".",
"uint8",
"(",
"1",
")",
"return",
"mats"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
calculate
|
groups together several numba compiled funcs
|
ipyrad/analysis/tetrad.py
|
def calculate(seqnon, mapcol, nmask, tests):
""" groups together several numba compiled funcs """
## create empty matrices
#LOGGER.info("tests[0] %s", tests[0])
#LOGGER.info('seqnon[[tests[0]]] %s', seqnon[[tests[0]]])
mats = chunk_to_matrices(seqnon, mapcol, nmask)
## empty svdscores for each arrangement of seqchunk
svds = np.zeros((3, 16), dtype=np.float64)
qscores = np.zeros(3, dtype=np.float64)
ranks = np.zeros(3, dtype=np.float64)
for test in range(3):
## get svd scores
svds[test] = np.linalg.svd(mats[test].astype(np.float64))[1]
ranks[test] = np.linalg.matrix_rank(mats[test].astype(np.float64))
## get minrank, or 11
minrank = int(min(11, ranks.min()))
for test in range(3):
qscores[test] = np.sqrt(np.sum(svds[test, minrank:]**2))
## sort to find the best qorder
best = np.where(qscores == qscores.min())[0]
#best = qscores[qscores == qscores.min()][0]
bidx = tests[best][0]
qsnps = count_snps(mats[best][0])
return bidx, qsnps
|
def calculate(seqnon, mapcol, nmask, tests):
""" groups together several numba compiled funcs """
## create empty matrices
#LOGGER.info("tests[0] %s", tests[0])
#LOGGER.info('seqnon[[tests[0]]] %s', seqnon[[tests[0]]])
mats = chunk_to_matrices(seqnon, mapcol, nmask)
## empty svdscores for each arrangement of seqchunk
svds = np.zeros((3, 16), dtype=np.float64)
qscores = np.zeros(3, dtype=np.float64)
ranks = np.zeros(3, dtype=np.float64)
for test in range(3):
## get svd scores
svds[test] = np.linalg.svd(mats[test].astype(np.float64))[1]
ranks[test] = np.linalg.matrix_rank(mats[test].astype(np.float64))
## get minrank, or 11
minrank = int(min(11, ranks.min()))
for test in range(3):
qscores[test] = np.sqrt(np.sum(svds[test, minrank:]**2))
## sort to find the best qorder
best = np.where(qscores == qscores.min())[0]
#best = qscores[qscores == qscores.min()][0]
bidx = tests[best][0]
qsnps = count_snps(mats[best][0])
return bidx, qsnps
|
[
"groups",
"together",
"several",
"numba",
"compiled",
"funcs"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1507-L1536
|
[
"def",
"calculate",
"(",
"seqnon",
",",
"mapcol",
",",
"nmask",
",",
"tests",
")",
":",
"## create empty matrices",
"#LOGGER.info(\"tests[0] %s\", tests[0])",
"#LOGGER.info('seqnon[[tests[0]]] %s', seqnon[[tests[0]]])",
"mats",
"=",
"chunk_to_matrices",
"(",
"seqnon",
",",
"mapcol",
",",
"nmask",
")",
"## empty svdscores for each arrangement of seqchunk",
"svds",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"16",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"qscores",
"=",
"np",
".",
"zeros",
"(",
"3",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"ranks",
"=",
"np",
".",
"zeros",
"(",
"3",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"for",
"test",
"in",
"range",
"(",
"3",
")",
":",
"## get svd scores",
"svds",
"[",
"test",
"]",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"mats",
"[",
"test",
"]",
".",
"astype",
"(",
"np",
".",
"float64",
")",
")",
"[",
"1",
"]",
"ranks",
"[",
"test",
"]",
"=",
"np",
".",
"linalg",
".",
"matrix_rank",
"(",
"mats",
"[",
"test",
"]",
".",
"astype",
"(",
"np",
".",
"float64",
")",
")",
"## get minrank, or 11",
"minrank",
"=",
"int",
"(",
"min",
"(",
"11",
",",
"ranks",
".",
"min",
"(",
")",
")",
")",
"for",
"test",
"in",
"range",
"(",
"3",
")",
":",
"qscores",
"[",
"test",
"]",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"svds",
"[",
"test",
",",
"minrank",
":",
"]",
"**",
"2",
")",
")",
"## sort to find the best qorder",
"best",
"=",
"np",
".",
"where",
"(",
"qscores",
"==",
"qscores",
".",
"min",
"(",
")",
")",
"[",
"0",
"]",
"#best = qscores[qscores == qscores.min()][0]",
"bidx",
"=",
"tests",
"[",
"best",
"]",
"[",
"0",
"]",
"qsnps",
"=",
"count_snps",
"(",
"mats",
"[",
"best",
"]",
"[",
"0",
"]",
")",
"return",
"bidx",
",",
"qsnps"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
nworker
|
The workhorse function. Not numba.
|
ipyrad/analysis/tetrad.py
|
def nworker(data, smpchunk, tests):
""" The workhorse function. Not numba. """
## tell engines to limit threads
#numba.config.NUMBA_DEFAULT_NUM_THREADS = 1
## open the seqarray view, the modified array is in bootsarr
with h5py.File(data.database.input, 'r') as io5:
seqview = io5["bootsarr"][:]
maparr = io5["bootsmap"][:]
## create an N-mask array of all seq cols (this isn't really too slow)
nall_mask = seqview[:] == 78
## tried numba compiling everythign below here, but was not faster
## than making nmask w/ axis arg in numpy
## get the input arrays ready
rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16)
rweights = None
#rweights = np.ones(smpchunk.shape[0], dtype=np.float64)
rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32)
#times = []
## fill arrays with results using numba funcs
for idx in xrange(smpchunk.shape[0]):
## get seqchunk for 4 samples (4, ncols)
sidx = smpchunk[idx]
seqchunk = seqview[sidx]
## get N-containing columns in 4-array, and invariant sites.
nmask = np.any(nall_mask[sidx], axis=0)
nmask += np.all(seqchunk == seqchunk[0], axis=0) ## <- do we need this?
## get matrices if there are any shared SNPs
## returns best-tree index, qscores, and qstats
#bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
## get weights from the three scores sorted.
## Only save to file if the quartet has information
rdstats[idx] = qstats
rquartets[idx] = smpchunk[idx][bidx]
return rquartets, rweights, rdstats
|
def nworker(data, smpchunk, tests):
""" The workhorse function. Not numba. """
## tell engines to limit threads
#numba.config.NUMBA_DEFAULT_NUM_THREADS = 1
## open the seqarray view, the modified array is in bootsarr
with h5py.File(data.database.input, 'r') as io5:
seqview = io5["bootsarr"][:]
maparr = io5["bootsmap"][:]
## create an N-mask array of all seq cols (this isn't really too slow)
nall_mask = seqview[:] == 78
## tried numba compiling everythign below here, but was not faster
## than making nmask w/ axis arg in numpy
## get the input arrays ready
rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16)
rweights = None
#rweights = np.ones(smpchunk.shape[0], dtype=np.float64)
rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32)
#times = []
## fill arrays with results using numba funcs
for idx in xrange(smpchunk.shape[0]):
## get seqchunk for 4 samples (4, ncols)
sidx = smpchunk[idx]
seqchunk = seqview[sidx]
## get N-containing columns in 4-array, and invariant sites.
nmask = np.any(nall_mask[sidx], axis=0)
nmask += np.all(seqchunk == seqchunk[0], axis=0) ## <- do we need this?
## get matrices if there are any shared SNPs
## returns best-tree index, qscores, and qstats
#bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
## get weights from the three scores sorted.
## Only save to file if the quartet has information
rdstats[idx] = qstats
rquartets[idx] = smpchunk[idx][bidx]
return rquartets, rweights, rdstats
|
[
"The",
"workhorse",
"function",
".",
"Not",
"numba",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1542-L1586
|
[
"def",
"nworker",
"(",
"data",
",",
"smpchunk",
",",
"tests",
")",
":",
"## tell engines to limit threads",
"#numba.config.NUMBA_DEFAULT_NUM_THREADS = 1",
"## open the seqarray view, the modified array is in bootsarr",
"with",
"h5py",
".",
"File",
"(",
"data",
".",
"database",
".",
"input",
",",
"'r'",
")",
"as",
"io5",
":",
"seqview",
"=",
"io5",
"[",
"\"bootsarr\"",
"]",
"[",
":",
"]",
"maparr",
"=",
"io5",
"[",
"\"bootsmap\"",
"]",
"[",
":",
"]",
"## create an N-mask array of all seq cols (this isn't really too slow)",
"nall_mask",
"=",
"seqview",
"[",
":",
"]",
"==",
"78",
"## tried numba compiling everythign below here, but was not faster",
"## than making nmask w/ axis arg in numpy",
"## get the input arrays ready",
"rquartets",
"=",
"np",
".",
"zeros",
"(",
"(",
"smpchunk",
".",
"shape",
"[",
"0",
"]",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
"rweights",
"=",
"None",
"#rweights = np.ones(smpchunk.shape[0], dtype=np.float64)",
"rdstats",
"=",
"np",
".",
"zeros",
"(",
"(",
"smpchunk",
".",
"shape",
"[",
"0",
"]",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"#times = []",
"## fill arrays with results using numba funcs",
"for",
"idx",
"in",
"xrange",
"(",
"smpchunk",
".",
"shape",
"[",
"0",
"]",
")",
":",
"## get seqchunk for 4 samples (4, ncols) ",
"sidx",
"=",
"smpchunk",
"[",
"idx",
"]",
"seqchunk",
"=",
"seqview",
"[",
"sidx",
"]",
"## get N-containing columns in 4-array, and invariant sites.",
"nmask",
"=",
"np",
".",
"any",
"(",
"nall_mask",
"[",
"sidx",
"]",
",",
"axis",
"=",
"0",
")",
"nmask",
"+=",
"np",
".",
"all",
"(",
"seqchunk",
"==",
"seqchunk",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"## <- do we need this?",
"## get matrices if there are any shared SNPs",
"## returns best-tree index, qscores, and qstats",
"#bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)",
"bidx",
",",
"qstats",
"=",
"calculate",
"(",
"seqchunk",
",",
"maparr",
"[",
":",
",",
"0",
"]",
",",
"nmask",
",",
"tests",
")",
"## get weights from the three scores sorted. ",
"## Only save to file if the quartet has information",
"rdstats",
"[",
"idx",
"]",
"=",
"qstats",
"rquartets",
"[",
"idx",
"]",
"=",
"smpchunk",
"[",
"idx",
"]",
"[",
"bidx",
"]",
"return",
"rquartets",
",",
"rweights",
",",
"rdstats"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
shuffle_cols
|
used in bootstrap resampling without a map file
|
ipyrad/analysis/tetrad.py
|
def shuffle_cols(seqarr, newarr, cols):
""" used in bootstrap resampling without a map file """
for idx in xrange(cols.shape[0]):
newarr[:, idx] = seqarr[:, cols[idx]]
return newarr
|
def shuffle_cols(seqarr, newarr, cols):
""" used in bootstrap resampling without a map file """
for idx in xrange(cols.shape[0]):
newarr[:, idx] = seqarr[:, cols[idx]]
return newarr
|
[
"used",
"in",
"bootstrap",
"resampling",
"without",
"a",
"map",
"file"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1638-L1642
|
[
"def",
"shuffle_cols",
"(",
"seqarr",
",",
"newarr",
",",
"cols",
")",
":",
"for",
"idx",
"in",
"xrange",
"(",
"cols",
".",
"shape",
"[",
"0",
"]",
")",
":",
"newarr",
"[",
":",
",",
"idx",
"]",
"=",
"seqarr",
"[",
":",
",",
"cols",
"[",
"idx",
"]",
"]",
"return",
"newarr"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
resolve_ambigs
|
returns a seq array with 'RSKYWM' randomly replaced with resolved bases
|
ipyrad/analysis/tetrad.py
|
def resolve_ambigs(tmpseq):
""" returns a seq array with 'RSKYWM' randomly replaced with resolved bases"""
## iterate over the bases 'RSKWYM': [82, 83, 75, 87, 89, 77]
for ambig in np.uint8([82, 83, 75, 87, 89, 77]):
## get all site in this ambig
idx, idy = np.where(tmpseq == ambig)
## get the two resolutions of the ambig
res1, res2 = AMBIGS[ambig.view("S1")]
## randomly sample half those sites
halfmask = np.random.choice([True, False], idx.shape[0])
## replace ambig bases with their resolutions
for i in xrange(halfmask.shape[0]):
if halfmask[i]:
tmpseq[idx[i], idy[i]] = np.array(res1).view(np.uint8)
else:
tmpseq[idx[i], idy[i]] = np.array(res2).view(np.uint8)
return tmpseq
|
def resolve_ambigs(tmpseq):
""" returns a seq array with 'RSKYWM' randomly replaced with resolved bases"""
## iterate over the bases 'RSKWYM': [82, 83, 75, 87, 89, 77]
for ambig in np.uint8([82, 83, 75, 87, 89, 77]):
## get all site in this ambig
idx, idy = np.where(tmpseq == ambig)
## get the two resolutions of the ambig
res1, res2 = AMBIGS[ambig.view("S1")]
## randomly sample half those sites
halfmask = np.random.choice([True, False], idx.shape[0])
## replace ambig bases with their resolutions
for i in xrange(halfmask.shape[0]):
if halfmask[i]:
tmpseq[idx[i], idy[i]] = np.array(res1).view(np.uint8)
else:
tmpseq[idx[i], idy[i]] = np.array(res2).view(np.uint8)
return tmpseq
|
[
"returns",
"a",
"seq",
"array",
"with",
"RSKYWM",
"randomly",
"replaced",
"with",
"resolved",
"bases"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1647-L1663
|
[
"def",
"resolve_ambigs",
"(",
"tmpseq",
")",
":",
"## iterate over the bases 'RSKWYM': [82, 83, 75, 87, 89, 77]",
"for",
"ambig",
"in",
"np",
".",
"uint8",
"(",
"[",
"82",
",",
"83",
",",
"75",
",",
"87",
",",
"89",
",",
"77",
"]",
")",
":",
"## get all site in this ambig",
"idx",
",",
"idy",
"=",
"np",
".",
"where",
"(",
"tmpseq",
"==",
"ambig",
")",
"## get the two resolutions of the ambig",
"res1",
",",
"res2",
"=",
"AMBIGS",
"[",
"ambig",
".",
"view",
"(",
"\"S1\"",
")",
"]",
"## randomly sample half those sites",
"halfmask",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"[",
"True",
",",
"False",
"]",
",",
"idx",
".",
"shape",
"[",
"0",
"]",
")",
"## replace ambig bases with their resolutions",
"for",
"i",
"in",
"xrange",
"(",
"halfmask",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"halfmask",
"[",
"i",
"]",
":",
"tmpseq",
"[",
"idx",
"[",
"i",
"]",
",",
"idy",
"[",
"i",
"]",
"]",
"=",
"np",
".",
"array",
"(",
"res1",
")",
".",
"view",
"(",
"np",
".",
"uint8",
")",
"else",
":",
"tmpseq",
"[",
"idx",
"[",
"i",
"]",
",",
"idy",
"[",
"i",
"]",
"]",
"=",
"np",
".",
"array",
"(",
"res2",
")",
".",
"view",
"(",
"np",
".",
"uint8",
")",
"return",
"tmpseq"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
get_spans
|
get span distance for each locus in original seqarray
|
ipyrad/analysis/tetrad.py
|
def get_spans(maparr, spans):
""" get span distance for each locus in original seqarray """
## start at 0, finds change at 1-index of map file
bidx = 1
spans = np.zeros((maparr[-1, 0], 2), np.uint64)
## read through marr and record when locus id changes
for idx in xrange(1, maparr.shape[0]):
cur = maparr[idx, 0]
if cur != bidx:
idy = idx + 1
spans[cur-2, 1] = idx
spans[cur-1, 0] = idx
bidx = cur
spans[-1, 1] = maparr[-1, -1]
return spans
|
def get_spans(maparr, spans):
""" get span distance for each locus in original seqarray """
## start at 0, finds change at 1-index of map file
bidx = 1
spans = np.zeros((maparr[-1, 0], 2), np.uint64)
## read through marr and record when locus id changes
for idx in xrange(1, maparr.shape[0]):
cur = maparr[idx, 0]
if cur != bidx:
idy = idx + 1
spans[cur-2, 1] = idx
spans[cur-1, 0] = idx
bidx = cur
spans[-1, 1] = maparr[-1, -1]
return spans
|
[
"get",
"span",
"distance",
"for",
"each",
"locus",
"in",
"original",
"seqarray"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1668-L1682
|
[
"def",
"get_spans",
"(",
"maparr",
",",
"spans",
")",
":",
"## start at 0, finds change at 1-index of map file",
"bidx",
"=",
"1",
"spans",
"=",
"np",
".",
"zeros",
"(",
"(",
"maparr",
"[",
"-",
"1",
",",
"0",
"]",
",",
"2",
")",
",",
"np",
".",
"uint64",
")",
"## read through marr and record when locus id changes",
"for",
"idx",
"in",
"xrange",
"(",
"1",
",",
"maparr",
".",
"shape",
"[",
"0",
"]",
")",
":",
"cur",
"=",
"maparr",
"[",
"idx",
",",
"0",
"]",
"if",
"cur",
"!=",
"bidx",
":",
"idy",
"=",
"idx",
"+",
"1",
"spans",
"[",
"cur",
"-",
"2",
",",
"1",
"]",
"=",
"idx",
"spans",
"[",
"cur",
"-",
"1",
",",
"0",
"]",
"=",
"idx",
"bidx",
"=",
"cur",
"spans",
"[",
"-",
"1",
",",
"1",
"]",
"=",
"maparr",
"[",
"-",
"1",
",",
"-",
"1",
"]",
"return",
"spans"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
get_shape
|
get shape of new bootstrap resampled locus array
|
ipyrad/analysis/tetrad.py
|
def get_shape(spans, loci):
""" get shape of new bootstrap resampled locus array """
width = 0
for idx in xrange(loci.shape[0]):
width += spans[loci[idx], 1] - spans[loci[idx], 0]
return width
|
def get_shape(spans, loci):
""" get shape of new bootstrap resampled locus array """
width = 0
for idx in xrange(loci.shape[0]):
width += spans[loci[idx], 1] - spans[loci[idx], 0]
return width
|
[
"get",
"shape",
"of",
"new",
"bootstrap",
"resampled",
"locus",
"array"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1687-L1692
|
[
"def",
"get_shape",
"(",
"spans",
",",
"loci",
")",
":",
"width",
"=",
"0",
"for",
"idx",
"in",
"xrange",
"(",
"loci",
".",
"shape",
"[",
"0",
"]",
")",
":",
"width",
"+=",
"spans",
"[",
"loci",
"[",
"idx",
"]",
",",
"1",
"]",
"-",
"spans",
"[",
"loci",
"[",
"idx",
"]",
",",
"0",
"]",
"return",
"width"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
fill_boot
|
fills the new bootstrap resampled array
|
ipyrad/analysis/tetrad.py
|
def fill_boot(seqarr, newboot, newmap, spans, loci):
""" fills the new bootstrap resampled array """
## column index
cidx = 0
## resample each locus
for i in xrange(loci.shape[0]):
## grab a random locus's columns
x1 = spans[loci[i]][0]
x2 = spans[loci[i]][1]
cols = seqarr[:, x1:x2]
## randomize columns within colsq
cord = np.random.choice(cols.shape[1], cols.shape[1], replace=False)
rcols = cols[:, cord]
## fill bootarr with n columns from seqarr
## the required length was already measured
newboot[:, cidx:cidx+cols.shape[1]] = rcols
## fill bootmap with new map info
newmap[cidx: cidx+cols.shape[1], 0] = i+1
## advance column index
cidx += cols.shape[1]
## return the concatenated cols
return newboot, newmap
|
def fill_boot(seqarr, newboot, newmap, spans, loci):
""" fills the new bootstrap resampled array """
## column index
cidx = 0
## resample each locus
for i in xrange(loci.shape[0]):
## grab a random locus's columns
x1 = spans[loci[i]][0]
x2 = spans[loci[i]][1]
cols = seqarr[:, x1:x2]
## randomize columns within colsq
cord = np.random.choice(cols.shape[1], cols.shape[1], replace=False)
rcols = cols[:, cord]
## fill bootarr with n columns from seqarr
## the required length was already measured
newboot[:, cidx:cidx+cols.shape[1]] = rcols
## fill bootmap with new map info
newmap[cidx: cidx+cols.shape[1], 0] = i+1
## advance column index
cidx += cols.shape[1]
## return the concatenated cols
return newboot, newmap
|
[
"fills",
"the",
"new",
"bootstrap",
"resampled",
"array"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1697-L1725
|
[
"def",
"fill_boot",
"(",
"seqarr",
",",
"newboot",
",",
"newmap",
",",
"spans",
",",
"loci",
")",
":",
"## column index",
"cidx",
"=",
"0",
"## resample each locus",
"for",
"i",
"in",
"xrange",
"(",
"loci",
".",
"shape",
"[",
"0",
"]",
")",
":",
"## grab a random locus's columns",
"x1",
"=",
"spans",
"[",
"loci",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"x2",
"=",
"spans",
"[",
"loci",
"[",
"i",
"]",
"]",
"[",
"1",
"]",
"cols",
"=",
"seqarr",
"[",
":",
",",
"x1",
":",
"x2",
"]",
"## randomize columns within colsq",
"cord",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"cols",
".",
"shape",
"[",
"1",
"]",
",",
"cols",
".",
"shape",
"[",
"1",
"]",
",",
"replace",
"=",
"False",
")",
"rcols",
"=",
"cols",
"[",
":",
",",
"cord",
"]",
"## fill bootarr with n columns from seqarr",
"## the required length was already measured",
"newboot",
"[",
":",
",",
"cidx",
":",
"cidx",
"+",
"cols",
".",
"shape",
"[",
"1",
"]",
"]",
"=",
"rcols",
"## fill bootmap with new map info",
"newmap",
"[",
"cidx",
":",
"cidx",
"+",
"cols",
".",
"shape",
"[",
"1",
"]",
",",
"0",
"]",
"=",
"i",
"+",
"1",
"## advance column index",
"cidx",
"+=",
"cols",
".",
"shape",
"[",
"1",
"]",
"## return the concatenated cols",
"return",
"newboot",
",",
"newmap"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_byteify
|
converts unicode to utf-8 when reading in json files
|
ipyrad/analysis/tetrad.py
|
def _byteify(data, ignore_dicts=False):
"""
converts unicode to utf-8 when reading in json files
"""
if isinstance(data, unicode):
return data.encode("utf-8")
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
return data
|
def _byteify(data, ignore_dicts=False):
"""
converts unicode to utf-8 when reading in json files
"""
if isinstance(data, unicode):
return data.encode("utf-8")
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
return data
|
[
"converts",
"unicode",
"to",
"utf",
"-",
"8",
"when",
"reading",
"in",
"json",
"files"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1729-L1744
|
[
"def",
"_byteify",
"(",
"data",
",",
"ignore_dicts",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"unicode",
")",
":",
"return",
"data",
".",
"encode",
"(",
"\"utf-8\"",
")",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"return",
"[",
"_byteify",
"(",
"item",
",",
"ignore_dicts",
"=",
"True",
")",
"for",
"item",
"in",
"data",
"]",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
"and",
"not",
"ignore_dicts",
":",
"return",
"{",
"_byteify",
"(",
"key",
",",
"ignore_dicts",
"=",
"True",
")",
":",
"_byteify",
"(",
"value",
",",
"ignore_dicts",
"=",
"True",
")",
"for",
"key",
",",
"value",
"in",
"data",
".",
"iteritems",
"(",
")",
"}",
"return",
"data"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
consensus_tree
|
An extended majority rule consensus function for ete3.
Modelled on the similar function from scikit-bio tree module. If
cutoff=0.5 then it is a normal majority rule consensus, while if
cutoff=0.0 then subsequent non-conflicting clades are added to the tree.
|
ipyrad/analysis/tetrad.py
|
def consensus_tree(trees, names=None, cutoff=0.0):
"""
An extended majority rule consensus function for ete3.
Modelled on the similar function from scikit-bio tree module. If
cutoff=0.5 then it is a normal majority rule consensus, while if
cutoff=0.0 then subsequent non-conflicting clades are added to the tree.
"""
## find which clades occured with freq > cutoff
namedict, clade_counts = _find_clades(trees, names=names)
## filter out the < cutoff clades
fclade_counts = _filter_clades(clade_counts, cutoff)
## build tree
consens_tree, _ = _build_trees(fclade_counts, namedict)
## make sure no singleton nodes were left behind
return consens_tree, clade_counts
|
def consensus_tree(trees, names=None, cutoff=0.0):
"""
An extended majority rule consensus function for ete3.
Modelled on the similar function from scikit-bio tree module. If
cutoff=0.5 then it is a normal majority rule consensus, while if
cutoff=0.0 then subsequent non-conflicting clades are added to the tree.
"""
## find which clades occured with freq > cutoff
namedict, clade_counts = _find_clades(trees, names=names)
## filter out the < cutoff clades
fclade_counts = _filter_clades(clade_counts, cutoff)
## build tree
consens_tree, _ = _build_trees(fclade_counts, namedict)
## make sure no singleton nodes were left behind
return consens_tree, clade_counts
|
[
"An",
"extended",
"majority",
"rule",
"consensus",
"function",
"for",
"ete3",
".",
"Modelled",
"on",
"the",
"similar",
"function",
"from",
"scikit",
"-",
"bio",
"tree",
"module",
".",
"If",
"cutoff",
"=",
"0",
".",
"5",
"then",
"it",
"is",
"a",
"normal",
"majority",
"rule",
"consensus",
"while",
"if",
"cutoff",
"=",
"0",
".",
"0",
"then",
"subsequent",
"non",
"-",
"conflicting",
"clades",
"are",
"added",
"to",
"the",
"tree",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1755-L1772
|
[
"def",
"consensus_tree",
"(",
"trees",
",",
"names",
"=",
"None",
",",
"cutoff",
"=",
"0.0",
")",
":",
"## find which clades occured with freq > cutoff",
"namedict",
",",
"clade_counts",
"=",
"_find_clades",
"(",
"trees",
",",
"names",
"=",
"names",
")",
"## filter out the < cutoff clades",
"fclade_counts",
"=",
"_filter_clades",
"(",
"clade_counts",
",",
"cutoff",
")",
"## build tree",
"consens_tree",
",",
"_",
"=",
"_build_trees",
"(",
"fclade_counts",
",",
"namedict",
")",
"## make sure no singleton nodes were left behind",
"return",
"consens_tree",
",",
"clade_counts"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
_filter_clades
|
A subfunc of consensus_tree(). Removes clades that occur
with freq < cutoff.
|
ipyrad/analysis/tetrad.py
|
def _filter_clades(clade_counts, cutoff):
"""
A subfunc of consensus_tree(). Removes clades that occur
with freq < cutoff.
"""
## store clades that pass filter
passed = []
clades = np.array([list(i[0]) for i in clade_counts], dtype=np.int8)
counts = np.array([i[1] for i in clade_counts], dtype=np.float64)
for idx in xrange(clades.shape[0]):
conflict = False
if counts[idx] < cutoff:
continue
if np.sum(clades[idx]) > 1:
# check the current clade against all the accepted clades to see if
# it conflicts. A conflict is defined as:
# 1. the clades are not disjoint
# 2. neither clade is a subset of the other
# OR:
# 1. it is inverse of clade (affects only <fake> root state)
# because at root node it mirror images {0011 : 95}, {1100 : 5}.
for aidx in passed:
#intersect = clade.intersection(accepted_clade)
summed = clades[idx] + clades[aidx]
intersect = np.max(summed) > 1
subset_test0 = np.all(clades[idx] - clades[aidx] >= 0)
subset_test1 = np.all(clades[aidx] - clades[idx] >= 0)
invert_test = np.bool_(clades[aidx]) != np.bool_(clades[idx])
if np.all(invert_test):
counts[aidx] += counts[idx]
conflict = True
if intersect:
if (not subset_test0) and (not subset_test1):
conflict = True
if conflict == False:
passed.append(idx)
## rebuild the dict
rclades = []#j for i, j in enumerate(clade_counts) if i in passed]
## set the counts to include mirrors
for idx in passed:
rclades.append((clades[idx], counts[idx]))
return rclades
|
def _filter_clades(clade_counts, cutoff):
"""
A subfunc of consensus_tree(). Removes clades that occur
with freq < cutoff.
"""
## store clades that pass filter
passed = []
clades = np.array([list(i[0]) for i in clade_counts], dtype=np.int8)
counts = np.array([i[1] for i in clade_counts], dtype=np.float64)
for idx in xrange(clades.shape[0]):
conflict = False
if counts[idx] < cutoff:
continue
if np.sum(clades[idx]) > 1:
# check the current clade against all the accepted clades to see if
# it conflicts. A conflict is defined as:
# 1. the clades are not disjoint
# 2. neither clade is a subset of the other
# OR:
# 1. it is inverse of clade (affects only <fake> root state)
# because at root node it mirror images {0011 : 95}, {1100 : 5}.
for aidx in passed:
#intersect = clade.intersection(accepted_clade)
summed = clades[idx] + clades[aidx]
intersect = np.max(summed) > 1
subset_test0 = np.all(clades[idx] - clades[aidx] >= 0)
subset_test1 = np.all(clades[aidx] - clades[idx] >= 0)
invert_test = np.bool_(clades[aidx]) != np.bool_(clades[idx])
if np.all(invert_test):
counts[aidx] += counts[idx]
conflict = True
if intersect:
if (not subset_test0) and (not subset_test1):
conflict = True
if conflict == False:
passed.append(idx)
## rebuild the dict
rclades = []#j for i, j in enumerate(clade_counts) if i in passed]
## set the counts to include mirrors
for idx in passed:
rclades.append((clades[idx], counts[idx]))
return rclades
|
[
"A",
"subfunc",
"of",
"consensus_tree",
"()",
".",
"Removes",
"clades",
"that",
"occur",
"with",
"freq",
"<",
"cutoff",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1776-L1824
|
[
"def",
"_filter_clades",
"(",
"clade_counts",
",",
"cutoff",
")",
":",
"## store clades that pass filter",
"passed",
"=",
"[",
"]",
"clades",
"=",
"np",
".",
"array",
"(",
"[",
"list",
"(",
"i",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"clade_counts",
"]",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"counts",
"=",
"np",
".",
"array",
"(",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"clade_counts",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"for",
"idx",
"in",
"xrange",
"(",
"clades",
".",
"shape",
"[",
"0",
"]",
")",
":",
"conflict",
"=",
"False",
"if",
"counts",
"[",
"idx",
"]",
"<",
"cutoff",
":",
"continue",
"if",
"np",
".",
"sum",
"(",
"clades",
"[",
"idx",
"]",
")",
">",
"1",
":",
"# check the current clade against all the accepted clades to see if",
"# it conflicts. A conflict is defined as:",
"# 1. the clades are not disjoint",
"# 2. neither clade is a subset of the other",
"# OR:",
"# 1. it is inverse of clade (affects only <fake> root state)",
"# because at root node it mirror images {0011 : 95}, {1100 : 5}.",
"for",
"aidx",
"in",
"passed",
":",
"#intersect = clade.intersection(accepted_clade)",
"summed",
"=",
"clades",
"[",
"idx",
"]",
"+",
"clades",
"[",
"aidx",
"]",
"intersect",
"=",
"np",
".",
"max",
"(",
"summed",
")",
">",
"1",
"subset_test0",
"=",
"np",
".",
"all",
"(",
"clades",
"[",
"idx",
"]",
"-",
"clades",
"[",
"aidx",
"]",
">=",
"0",
")",
"subset_test1",
"=",
"np",
".",
"all",
"(",
"clades",
"[",
"aidx",
"]",
"-",
"clades",
"[",
"idx",
"]",
">=",
"0",
")",
"invert_test",
"=",
"np",
".",
"bool_",
"(",
"clades",
"[",
"aidx",
"]",
")",
"!=",
"np",
".",
"bool_",
"(",
"clades",
"[",
"idx",
"]",
")",
"if",
"np",
".",
"all",
"(",
"invert_test",
")",
":",
"counts",
"[",
"aidx",
"]",
"+=",
"counts",
"[",
"idx",
"]",
"conflict",
"=",
"True",
"if",
"intersect",
":",
"if",
"(",
"not",
"subset_test0",
")",
"and",
"(",
"not",
"subset_test1",
")",
":",
"conflict",
"=",
"True",
"if",
"conflict",
"==",
"False",
":",
"passed",
".",
"append",
"(",
"idx",
")",
"## rebuild the dict",
"rclades",
"=",
"[",
"]",
"#j for i, j in enumerate(clade_counts) if i in passed]",
"## set the counts to include mirrors",
"for",
"idx",
"in",
"passed",
":",
"rclades",
".",
"append",
"(",
"(",
"clades",
"[",
"idx",
"]",
",",
"counts",
"[",
"idx",
"]",
")",
")",
"return",
"rclades"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad.refresh
|
Remove all existing results files and reinit the h5 arrays
so that the tetrad object is just like fresh from a CLI start.
|
ipyrad/analysis/tetrad.py
|
def refresh(self):
"""
Remove all existing results files and reinit the h5 arrays
so that the tetrad object is just like fresh from a CLI start.
"""
## clear any existing results files
oldfiles = [self.files.qdump] + \
self.database.__dict__.values() + \
self.trees.__dict__.values()
for oldfile in oldfiles:
if oldfile:
if os.path.exists(oldfile):
os.remove(oldfile)
## store old ipcluster info
oldcluster = copy.deepcopy(self._ipcluster)
## reinit the tetrad object data.
self.__init__(
name=self.name,
data=self.files.data,
mapfile=self.files.mapfile,
workdir=self.dirs,
method=self.params.method,
guidetreefile=self.files.guidetreefile,
resolve=self._resolve,
nboots=self.params.nboots,
nquartets=self.params.nquartets,
initarr=True,
quiet=True,
cli=self.kwargs.get("cli")
)
## retain the same ipcluster info
self._ipcluster = oldcluster
|
def refresh(self):
"""
Remove all existing results files and reinit the h5 arrays
so that the tetrad object is just like fresh from a CLI start.
"""
## clear any existing results files
oldfiles = [self.files.qdump] + \
self.database.__dict__.values() + \
self.trees.__dict__.values()
for oldfile in oldfiles:
if oldfile:
if os.path.exists(oldfile):
os.remove(oldfile)
## store old ipcluster info
oldcluster = copy.deepcopy(self._ipcluster)
## reinit the tetrad object data.
self.__init__(
name=self.name,
data=self.files.data,
mapfile=self.files.mapfile,
workdir=self.dirs,
method=self.params.method,
guidetreefile=self.files.guidetreefile,
resolve=self._resolve,
nboots=self.params.nboots,
nquartets=self.params.nquartets,
initarr=True,
quiet=True,
cli=self.kwargs.get("cli")
)
## retain the same ipcluster info
self._ipcluster = oldcluster
|
[
"Remove",
"all",
"existing",
"results",
"files",
"and",
"reinit",
"the",
"h5",
"arrays",
"so",
"that",
"the",
"tetrad",
"object",
"is",
"just",
"like",
"fresh",
"from",
"a",
"CLI",
"start",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L239-L274
|
[
"def",
"refresh",
"(",
"self",
")",
":",
"## clear any existing results files",
"oldfiles",
"=",
"[",
"self",
".",
"files",
".",
"qdump",
"]",
"+",
"self",
".",
"database",
".",
"__dict__",
".",
"values",
"(",
")",
"+",
"self",
".",
"trees",
".",
"__dict__",
".",
"values",
"(",
")",
"for",
"oldfile",
"in",
"oldfiles",
":",
"if",
"oldfile",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"oldfile",
")",
":",
"os",
".",
"remove",
"(",
"oldfile",
")",
"## store old ipcluster info",
"oldcluster",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_ipcluster",
")",
"## reinit the tetrad object data.",
"self",
".",
"__init__",
"(",
"name",
"=",
"self",
".",
"name",
",",
"data",
"=",
"self",
".",
"files",
".",
"data",
",",
"mapfile",
"=",
"self",
".",
"files",
".",
"mapfile",
",",
"workdir",
"=",
"self",
".",
"dirs",
",",
"method",
"=",
"self",
".",
"params",
".",
"method",
",",
"guidetreefile",
"=",
"self",
".",
"files",
".",
"guidetreefile",
",",
"resolve",
"=",
"self",
".",
"_resolve",
",",
"nboots",
"=",
"self",
".",
"params",
".",
"nboots",
",",
"nquartets",
"=",
"self",
".",
"params",
".",
"nquartets",
",",
"initarr",
"=",
"True",
",",
"quiet",
"=",
"True",
",",
"cli",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"\"cli\"",
")",
")",
"## retain the same ipcluster info",
"self",
".",
"_ipcluster",
"=",
"oldcluster"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._parse_names
|
parse sample names from the sequence file
|
ipyrad/analysis/tetrad.py
|
def _parse_names(self):
""" parse sample names from the sequence file"""
self.samples = []
with iter(open(self.files.data, 'r')) as infile:
infile.next().strip().split()
while 1:
try:
self.samples.append(infile.next().split()[0])
except StopIteration:
break
|
def _parse_names(self):
""" parse sample names from the sequence file"""
self.samples = []
with iter(open(self.files.data, 'r')) as infile:
infile.next().strip().split()
while 1:
try:
self.samples.append(infile.next().split()[0])
except StopIteration:
break
|
[
"parse",
"sample",
"names",
"from",
"the",
"sequence",
"file"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L278-L287
|
[
"def",
"_parse_names",
"(",
"self",
")",
":",
"self",
".",
"samples",
"=",
"[",
"]",
"with",
"iter",
"(",
"open",
"(",
"self",
".",
"files",
".",
"data",
",",
"'r'",
")",
")",
"as",
"infile",
":",
"infile",
".",
"next",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"while",
"1",
":",
"try",
":",
"self",
".",
"samples",
".",
"append",
"(",
"infile",
".",
"next",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"except",
"StopIteration",
":",
"break"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._init_seqarray
|
Fills the seqarr with the full data set, and creates a bootsarr copy
with the following modifications:
1) converts "-" into "N"s, since they are similarly treated as missing.
2) randomly resolve ambiguities (RSKWYM)
3) convert to uint8 for smaller memory load and faster computation
|
ipyrad/analysis/tetrad.py
|
def _init_seqarray(self, quiet=False):
"""
Fills the seqarr with the full data set, and creates a bootsarr copy
with the following modifications:
1) converts "-" into "N"s, since they are similarly treated as missing.
2) randomly resolve ambiguities (RSKWYM)
3) convert to uint8 for smaller memory load and faster computation
"""
## read in the data (seqfile)
try:
spath = open(self.files.data, 'r')
except IOError:
raise IPyradWarningExit(NO_SNP_FILE.format(self.files.data))
line = spath.readline().strip().split()
ntax = int(line[0])
nbp = int(line[1])
## make a tmp seq array
if not quiet:
print("loading seq array [{} taxa x {} bp]".format(ntax, nbp))
tmpseq = np.zeros((ntax, nbp), dtype=np.uint8)
## create array storage for real seq and the tmp bootstrap seqarray
with h5py.File(self.database.input, 'w') as io5:
io5.create_dataset("seqarr", (ntax, nbp), dtype=np.uint8)
io5.create_dataset("bootsarr", (ntax, nbp), dtype=np.uint8)
io5.create_dataset("bootsmap", (nbp, 2), dtype=np.uint32)
## if there is a map file, load it into the bootsmap
if self.files.mapfile:
with open(self.files.mapfile, 'r') as inmap:
## parse the map file from txt and save as dataset
maparr = np.genfromtxt(inmap, dtype=np.uint64)
io5["bootsmap"][:] = maparr[:, [0, 3]]
## parse the span info from maparr and save to dataset
spans = np.zeros((maparr[-1, 0], 2), np.uint64)
spans = get_spans(maparr, spans)
io5.create_dataset("spans", data=spans)
if not quiet:
print("max unlinked SNPs per quartet (nloci): {}"\
.format(spans.shape[0]))
else:
io5["bootsmap"][:, 0] = np.arange(io5["bootsmap"].shape[0])
## fill the tmp array from the input phy
for line, seq in enumerate(spath.readlines()):
tmpseq[line] = np.array(list(seq.split()[-1])).view(np.uint8)
## convert '-' or '_' into 'N'
tmpseq[tmpseq == 45] = 78
tmpseq[tmpseq == 95] = 78
## save array to disk so it can be easily accessed by slicing
## This unmodified array is used again later for sampling boots
io5["seqarr"][:] = tmpseq
## resolve ambiguous IUPAC codes
if self._resolve:
tmpseq = resolve_ambigs(tmpseq)
## convert CATG bases to matrix indices
tmpseq[tmpseq == 65] = 0
tmpseq[tmpseq == 67] = 1
tmpseq[tmpseq == 71] = 2
tmpseq[tmpseq == 84] = 3
## save modified array to disk
io5["bootsarr"][:] = tmpseq
## memory cleanup
#del tmpseq
## get initial array
LOGGER.info("original seqarr \n %s", io5["seqarr"][:, :20])
LOGGER.info("original bootsarr \n %s", io5["bootsarr"][:, :20])
LOGGER.info("original bootsmap \n %s", io5["bootsmap"][:20, :])
|
def _init_seqarray(self, quiet=False):
"""
Fills the seqarr with the full data set, and creates a bootsarr copy
with the following modifications:
1) converts "-" into "N"s, since they are similarly treated as missing.
2) randomly resolve ambiguities (RSKWYM)
3) convert to uint8 for smaller memory load and faster computation
"""
## read in the data (seqfile)
try:
spath = open(self.files.data, 'r')
except IOError:
raise IPyradWarningExit(NO_SNP_FILE.format(self.files.data))
line = spath.readline().strip().split()
ntax = int(line[0])
nbp = int(line[1])
## make a tmp seq array
if not quiet:
print("loading seq array [{} taxa x {} bp]".format(ntax, nbp))
tmpseq = np.zeros((ntax, nbp), dtype=np.uint8)
## create array storage for real seq and the tmp bootstrap seqarray
with h5py.File(self.database.input, 'w') as io5:
io5.create_dataset("seqarr", (ntax, nbp), dtype=np.uint8)
io5.create_dataset("bootsarr", (ntax, nbp), dtype=np.uint8)
io5.create_dataset("bootsmap", (nbp, 2), dtype=np.uint32)
## if there is a map file, load it into the bootsmap
if self.files.mapfile:
with open(self.files.mapfile, 'r') as inmap:
## parse the map file from txt and save as dataset
maparr = np.genfromtxt(inmap, dtype=np.uint64)
io5["bootsmap"][:] = maparr[:, [0, 3]]
## parse the span info from maparr and save to dataset
spans = np.zeros((maparr[-1, 0], 2), np.uint64)
spans = get_spans(maparr, spans)
io5.create_dataset("spans", data=spans)
if not quiet:
print("max unlinked SNPs per quartet (nloci): {}"\
.format(spans.shape[0]))
else:
io5["bootsmap"][:, 0] = np.arange(io5["bootsmap"].shape[0])
## fill the tmp array from the input phy
for line, seq in enumerate(spath.readlines()):
tmpseq[line] = np.array(list(seq.split()[-1])).view(np.uint8)
## convert '-' or '_' into 'N'
tmpseq[tmpseq == 45] = 78
tmpseq[tmpseq == 95] = 78
## save array to disk so it can be easily accessed by slicing
## This unmodified array is used again later for sampling boots
io5["seqarr"][:] = tmpseq
## resolve ambiguous IUPAC codes
if self._resolve:
tmpseq = resolve_ambigs(tmpseq)
## convert CATG bases to matrix indices
tmpseq[tmpseq == 65] = 0
tmpseq[tmpseq == 67] = 1
tmpseq[tmpseq == 71] = 2
tmpseq[tmpseq == 84] = 3
## save modified array to disk
io5["bootsarr"][:] = tmpseq
## memory cleanup
#del tmpseq
## get initial array
LOGGER.info("original seqarr \n %s", io5["seqarr"][:, :20])
LOGGER.info("original bootsarr \n %s", io5["bootsarr"][:, :20])
LOGGER.info("original bootsmap \n %s", io5["bootsmap"][:20, :])
|
[
"Fills",
"the",
"seqarr",
"with",
"the",
"full",
"data",
"set",
"and",
"creates",
"a",
"bootsarr",
"copy",
"with",
"the",
"following",
"modifications",
":"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L291-L369
|
[
"def",
"_init_seqarray",
"(",
"self",
",",
"quiet",
"=",
"False",
")",
":",
"## read in the data (seqfile)",
"try",
":",
"spath",
"=",
"open",
"(",
"self",
".",
"files",
".",
"data",
",",
"'r'",
")",
"except",
"IOError",
":",
"raise",
"IPyradWarningExit",
"(",
"NO_SNP_FILE",
".",
"format",
"(",
"self",
".",
"files",
".",
"data",
")",
")",
"line",
"=",
"spath",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"ntax",
"=",
"int",
"(",
"line",
"[",
"0",
"]",
")",
"nbp",
"=",
"int",
"(",
"line",
"[",
"1",
"]",
")",
"## make a tmp seq array",
"if",
"not",
"quiet",
":",
"print",
"(",
"\"loading seq array [{} taxa x {} bp]\"",
".",
"format",
"(",
"ntax",
",",
"nbp",
")",
")",
"tmpseq",
"=",
"np",
".",
"zeros",
"(",
"(",
"ntax",
",",
"nbp",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"## create array storage for real seq and the tmp bootstrap seqarray",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"input",
",",
"'w'",
")",
"as",
"io5",
":",
"io5",
".",
"create_dataset",
"(",
"\"seqarr\"",
",",
"(",
"ntax",
",",
"nbp",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"io5",
".",
"create_dataset",
"(",
"\"bootsarr\"",
",",
"(",
"ntax",
",",
"nbp",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"io5",
".",
"create_dataset",
"(",
"\"bootsmap\"",
",",
"(",
"nbp",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"## if there is a map file, load it into the bootsmap",
"if",
"self",
".",
"files",
".",
"mapfile",
":",
"with",
"open",
"(",
"self",
".",
"files",
".",
"mapfile",
",",
"'r'",
")",
"as",
"inmap",
":",
"## parse the map file from txt and save as dataset",
"maparr",
"=",
"np",
".",
"genfromtxt",
"(",
"inmap",
",",
"dtype",
"=",
"np",
".",
"uint64",
")",
"io5",
"[",
"\"bootsmap\"",
"]",
"[",
":",
"]",
"=",
"maparr",
"[",
":",
",",
"[",
"0",
",",
"3",
"]",
"]",
"## parse the span info from maparr and save to dataset",
"spans",
"=",
"np",
".",
"zeros",
"(",
"(",
"maparr",
"[",
"-",
"1",
",",
"0",
"]",
",",
"2",
")",
",",
"np",
".",
"uint64",
")",
"spans",
"=",
"get_spans",
"(",
"maparr",
",",
"spans",
")",
"io5",
".",
"create_dataset",
"(",
"\"spans\"",
",",
"data",
"=",
"spans",
")",
"if",
"not",
"quiet",
":",
"print",
"(",
"\"max unlinked SNPs per quartet (nloci): {}\"",
".",
"format",
"(",
"spans",
".",
"shape",
"[",
"0",
"]",
")",
")",
"else",
":",
"io5",
"[",
"\"bootsmap\"",
"]",
"[",
":",
",",
"0",
"]",
"=",
"np",
".",
"arange",
"(",
"io5",
"[",
"\"bootsmap\"",
"]",
".",
"shape",
"[",
"0",
"]",
")",
"## fill the tmp array from the input phy",
"for",
"line",
",",
"seq",
"in",
"enumerate",
"(",
"spath",
".",
"readlines",
"(",
")",
")",
":",
"tmpseq",
"[",
"line",
"]",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"seq",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
")",
")",
".",
"view",
"(",
"np",
".",
"uint8",
")",
"## convert '-' or '_' into 'N'",
"tmpseq",
"[",
"tmpseq",
"==",
"45",
"]",
"=",
"78",
"tmpseq",
"[",
"tmpseq",
"==",
"95",
"]",
"=",
"78",
"## save array to disk so it can be easily accessed by slicing",
"## This unmodified array is used again later for sampling boots",
"io5",
"[",
"\"seqarr\"",
"]",
"[",
":",
"]",
"=",
"tmpseq",
"## resolve ambiguous IUPAC codes",
"if",
"self",
".",
"_resolve",
":",
"tmpseq",
"=",
"resolve_ambigs",
"(",
"tmpseq",
")",
"## convert CATG bases to matrix indices",
"tmpseq",
"[",
"tmpseq",
"==",
"65",
"]",
"=",
"0",
"tmpseq",
"[",
"tmpseq",
"==",
"67",
"]",
"=",
"1",
"tmpseq",
"[",
"tmpseq",
"==",
"71",
"]",
"=",
"2",
"tmpseq",
"[",
"tmpseq",
"==",
"84",
"]",
"=",
"3",
"## save modified array to disk ",
"io5",
"[",
"\"bootsarr\"",
"]",
"[",
":",
"]",
"=",
"tmpseq",
"## memory cleanup",
"#del tmpseq",
"## get initial array",
"LOGGER",
".",
"info",
"(",
"\"original seqarr \\n %s\"",
",",
"io5",
"[",
"\"seqarr\"",
"]",
"[",
":",
",",
":",
"20",
"]",
")",
"LOGGER",
".",
"info",
"(",
"\"original bootsarr \\n %s\"",
",",
"io5",
"[",
"\"bootsarr\"",
"]",
"[",
":",
",",
":",
"20",
"]",
")",
"LOGGER",
".",
"info",
"(",
"\"original bootsmap \\n %s\"",
",",
"io5",
"[",
"\"bootsmap\"",
"]",
"[",
":",
"20",
",",
":",
"]",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._store_N_samples
|
Find all quartets of samples and store in a large array
Create a chunk size for sampling from the array of quartets.
This should be relatively large so that we don't spend a lot of time
doing I/O, but small enough that jobs finish often for checkpointing.
|
ipyrad/analysis/tetrad.py
|
def _store_N_samples(self, ncpus):
"""
Find all quartets of samples and store in a large array
Create a chunk size for sampling from the array of quartets.
This should be relatively large so that we don't spend a lot of time
doing I/O, but small enough that jobs finish often for checkpointing.
"""
breaks = 2
if self.params.nquartets < 5000:
breaks = 1
if self.params.nquartets > 100000:
breaks = 4
if self.params.nquartets > 500000:
breaks = 8
## chunk up the data
self._chunksize = (self.params.nquartets // (breaks * ncpus) + \
(self.params.nquartets % (breaks * ncpus)))
LOGGER.info("nquarts = %s, chunk = %s", self.params.nquartets, self._chunksize)
## 'samples' stores the indices of the quartet.
## `quartets` stores the correct quartet in the order (1,2|3,4)
## `weights` stores the weight of the quartet in 'quartets'
## we gzip this for now, but check later if this has a big speed cost
## create h5 OUT empty arrays
with h5py.File(self.database.output, 'w') as io5:
io5.create_dataset("quartets",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4))
io5.create_dataset("qstats",
(self.params.nquartets, 4),
dtype=np.uint32,
chunks=(self._chunksize, 4))
io5.create_group("qboots")
## append to h5 IN array (which also has seqarray) and fill it
with h5py.File(self.database.input, 'a') as io5:
## create data sets
io5.create_dataset("samples",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4),
compression='gzip')
## populate array with all possible quartets. This allows us to
## sample from the total, and also to continue from a checkpoint
qiter = itertools.combinations(xrange(len(self.samples)), 4)
i = 0
## fill chunksize at a time for efficiency
while i < self.params.nquartets:
if self.params.method != "all":
## grab the next random 1000
qiter = []
while len(qiter) < min(self._chunksize, io5["samples"].shape[0]):
qiter.append(
random_combination(range(len(self.samples)), 4))
dat = np.array(qiter)
else:
## grab the next ordered chunksize
dat = np.array(list(itertools.islice(qiter, self._chunksize)))
## store to h5
io5["samples"][i:i+self._chunksize] = dat[:io5["samples"].shape[0] - i]
i += self._chunksize
|
def _store_N_samples(self, ncpus):
"""
Find all quartets of samples and store in a large array
Create a chunk size for sampling from the array of quartets.
This should be relatively large so that we don't spend a lot of time
doing I/O, but small enough that jobs finish often for checkpointing.
"""
breaks = 2
if self.params.nquartets < 5000:
breaks = 1
if self.params.nquartets > 100000:
breaks = 4
if self.params.nquartets > 500000:
breaks = 8
## chunk up the data
self._chunksize = (self.params.nquartets // (breaks * ncpus) + \
(self.params.nquartets % (breaks * ncpus)))
LOGGER.info("nquarts = %s, chunk = %s", self.params.nquartets, self._chunksize)
## 'samples' stores the indices of the quartet.
## `quartets` stores the correct quartet in the order (1,2|3,4)
## `weights` stores the weight of the quartet in 'quartets'
## we gzip this for now, but check later if this has a big speed cost
## create h5 OUT empty arrays
with h5py.File(self.database.output, 'w') as io5:
io5.create_dataset("quartets",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4))
io5.create_dataset("qstats",
(self.params.nquartets, 4),
dtype=np.uint32,
chunks=(self._chunksize, 4))
io5.create_group("qboots")
## append to h5 IN array (which also has seqarray) and fill it
with h5py.File(self.database.input, 'a') as io5:
## create data sets
io5.create_dataset("samples",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4),
compression='gzip')
## populate array with all possible quartets. This allows us to
## sample from the total, and also to continue from a checkpoint
qiter = itertools.combinations(xrange(len(self.samples)), 4)
i = 0
## fill chunksize at a time for efficiency
while i < self.params.nquartets:
if self.params.method != "all":
## grab the next random 1000
qiter = []
while len(qiter) < min(self._chunksize, io5["samples"].shape[0]):
qiter.append(
random_combination(range(len(self.samples)), 4))
dat = np.array(qiter)
else:
## grab the next ordered chunksize
dat = np.array(list(itertools.islice(qiter, self._chunksize)))
## store to h5
io5["samples"][i:i+self._chunksize] = dat[:io5["samples"].shape[0] - i]
i += self._chunksize
|
[
"Find",
"all",
"quartets",
"of",
"samples",
"and",
"store",
"in",
"a",
"large",
"array",
"Create",
"a",
"chunk",
"size",
"for",
"sampling",
"from",
"the",
"array",
"of",
"quartets",
".",
"This",
"should",
"be",
"relatively",
"large",
"so",
"that",
"we",
"don",
"t",
"spend",
"a",
"lot",
"of",
"time",
"doing",
"I",
"/",
"O",
"but",
"small",
"enough",
"that",
"jobs",
"finish",
"often",
"for",
"checkpointing",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L445-L512
|
[
"def",
"_store_N_samples",
"(",
"self",
",",
"ncpus",
")",
":",
"breaks",
"=",
"2",
"if",
"self",
".",
"params",
".",
"nquartets",
"<",
"5000",
":",
"breaks",
"=",
"1",
"if",
"self",
".",
"params",
".",
"nquartets",
">",
"100000",
":",
"breaks",
"=",
"4",
"if",
"self",
".",
"params",
".",
"nquartets",
">",
"500000",
":",
"breaks",
"=",
"8",
"## chunk up the data",
"self",
".",
"_chunksize",
"=",
"(",
"self",
".",
"params",
".",
"nquartets",
"//",
"(",
"breaks",
"*",
"ncpus",
")",
"+",
"(",
"self",
".",
"params",
".",
"nquartets",
"%",
"(",
"breaks",
"*",
"ncpus",
")",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"nquarts = %s, chunk = %s\"",
",",
"self",
".",
"params",
".",
"nquartets",
",",
"self",
".",
"_chunksize",
")",
"## 'samples' stores the indices of the quartet. ",
"## `quartets` stores the correct quartet in the order (1,2|3,4)",
"## `weights` stores the weight of the quartet in 'quartets'",
"## we gzip this for now, but check later if this has a big speed cost",
"## create h5 OUT empty arrays",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"output",
",",
"'w'",
")",
"as",
"io5",
":",
"io5",
".",
"create_dataset",
"(",
"\"quartets\"",
",",
"(",
"self",
".",
"params",
".",
"nquartets",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
",",
"chunks",
"=",
"(",
"self",
".",
"_chunksize",
",",
"4",
")",
")",
"io5",
".",
"create_dataset",
"(",
"\"qstats\"",
",",
"(",
"self",
".",
"params",
".",
"nquartets",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
",",
"chunks",
"=",
"(",
"self",
".",
"_chunksize",
",",
"4",
")",
")",
"io5",
".",
"create_group",
"(",
"\"qboots\"",
")",
"## append to h5 IN array (which also has seqarray) and fill it",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"input",
",",
"'a'",
")",
"as",
"io5",
":",
"## create data sets",
"io5",
".",
"create_dataset",
"(",
"\"samples\"",
",",
"(",
"self",
".",
"params",
".",
"nquartets",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
",",
"chunks",
"=",
"(",
"self",
".",
"_chunksize",
",",
"4",
")",
",",
"compression",
"=",
"'gzip'",
")",
"## populate array with all possible quartets. This allows us to ",
"## sample from the total, and also to continue from a checkpoint",
"qiter",
"=",
"itertools",
".",
"combinations",
"(",
"xrange",
"(",
"len",
"(",
"self",
".",
"samples",
")",
")",
",",
"4",
")",
"i",
"=",
"0",
"## fill chunksize at a time for efficiency",
"while",
"i",
"<",
"self",
".",
"params",
".",
"nquartets",
":",
"if",
"self",
".",
"params",
".",
"method",
"!=",
"\"all\"",
":",
"## grab the next random 1000",
"qiter",
"=",
"[",
"]",
"while",
"len",
"(",
"qiter",
")",
"<",
"min",
"(",
"self",
".",
"_chunksize",
",",
"io5",
"[",
"\"samples\"",
"]",
".",
"shape",
"[",
"0",
"]",
")",
":",
"qiter",
".",
"append",
"(",
"random_combination",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"samples",
")",
")",
",",
"4",
")",
")",
"dat",
"=",
"np",
".",
"array",
"(",
"qiter",
")",
"else",
":",
"## grab the next ordered chunksize",
"dat",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"itertools",
".",
"islice",
"(",
"qiter",
",",
"self",
".",
"_chunksize",
")",
")",
")",
"## store to h5 ",
"io5",
"[",
"\"samples\"",
"]",
"[",
"i",
":",
"i",
"+",
"self",
".",
"_chunksize",
"]",
"=",
"dat",
"[",
":",
"io5",
"[",
"\"samples\"",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"i",
"]",
"i",
"+=",
"self",
".",
"_chunksize"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._store_equal_samples
|
sample quartets evenly across splits of the starting tree, and fills
in remaining samples with random quartet samples. Uses a hash dict to
not sample the same quartet twice, so for very large trees this can
take a few minutes to find millions of possible quartet samples.
|
ipyrad/analysis/tetrad.py
|
def _store_equal_samples(self, ncpus):
"""
sample quartets evenly across splits of the starting tree, and fills
in remaining samples with random quartet samples. Uses a hash dict to
not sample the same quartet twice, so for very large trees this can
take a few minutes to find millions of possible quartet samples.
"""
## choose chunker for h5 arr
breaks = 2
if self.params.nquartets < 5000:
breaks = 1
if self.params.nquartets > 100000:
breaks = 4
if self.params.nquartets > 500000:
breaks = 8
self._chunksize = (self.params.nquartets // (breaks * ncpus) + \
(self.params.nquartets % (breaks * ncpus)))
LOGGER.info("nquarts = %s, chunk = %s", self.params.nquartets, self._chunksize)
## create h5 OUT empty arrays
with h5py.File(self.database.output, 'w') as io5:
io5.create_dataset("quartets",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4))
io5.create_dataset("qstats",
(self.params.nquartets, 4),
dtype=np.uint32,
chunks=(self._chunksize, 4))
io5.create_group("qboots")
## get starting tree, unroot, randomly resolve, ladderize
tre = ete3.Tree(self.files.guidetreefile, format=0)
#tre = toytree.tree(self.files.guidetreefile, format=0)
tre.tree.unroot()
tre.tree.resolve_polytomy(recursive=True)
tre.tree.ladderize()
## randomly sample all splits of tree and convert tip names to indices
splits = [([self.samples.index(z.name) for z in i],
[self.samples.index(z.name) for z in j]) \
for (i, j) in tre.get_edges()]
## only keep internal splits (no single tips edges)
## this seemed to cause problems with unsampled tips
splits = [i for i in splits if all([len(j) > 1 for j in i])]
## turn each into an iterable split sampler
## if the nquartets for that split is small, then sample all of them
## if it is big, then make it a random sampler from that split
qiters = []
## how many min quartets are we gonna sample from each split?
squarts = self.params.nquartets // len(splits)
## how many iterators can be sampled to saturation?
nsaturation = 0
for split in splits:
## if small number at this split then sample all possible sets
## we will exhaust this quickly and then switch to random for
## the larger splits.
if n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2) < squarts*2:
qiter = (i+j for (i, j) in itertools.product(
itertools.combinations(split[0], 2),
itertools.combinations(split[1], 2)))
nsaturation += 1
## else create random sampler across that split, this is slower
## because it can propose the same split repeatedly and so we
## have to check it against the 'sampled' set.
else:
qiter = (random_product(split[0], split[1]) for _ \
in xrange(self.params.nquartets))
nsaturation += 1
## store all iterators into a list
qiters.append(qiter)
#for split in splits:
# print(split)
## make qiters infinitely cycling
qiters = itertools.cycle(qiters)
cycler = itertools.cycle(range(len(splits)))
## store visiting quartets
sampled = set()
## iterate over qiters sampling from each, if one runs out, keep
## sampling from remaining qiters. Keep going until samples is filled
with h5py.File(self.database.input, 'a') as io5:
## create data sets
io5.create_dataset("samples",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4),
compression='gzip')
## fill chunksize at a time for efficiency
i = 0
empty = set()
edge_targeted = 0
random_target = 0
## keep filling quartets until nquartets are sampled
while i < self.params.nquartets:
qdat = []
## keep filling this chunk until its full
while len(qdat) < self._chunksize:
## grab the next iterator
qiter = qiters.next()
cycle = cycler.next()
## sample from iterator
try:
qrtsamp = qiter.next()
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
edge_targeted += 1
#else:
# print('repeat')
## unless iterator is empty, then skip it
except StopIteration:
empty.add(cycle)
## break when all edge samplers are empty
if len(empty) == nsaturation:
break
## if array is not full then add random samples
while len(qdat) < self._chunksize:
qrtsamp = random_combination(range(len(self.samples)), 4)
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
random_target += 1
## stick chunk into h5 array
dat = np.array(qdat, dtype=np.uint16)
io5["samples"][i:i+self._chunksize] = dat[:io5["samples"].shape[0] - i]
i += self._chunksize
print(" equal sampling: {} edge quartets, {} random quartets "\
.format(edge_targeted, random_target))
|
def _store_equal_samples(self, ncpus):
"""
sample quartets evenly across splits of the starting tree, and fills
in remaining samples with random quartet samples. Uses a hash dict to
not sample the same quartet twice, so for very large trees this can
take a few minutes to find millions of possible quartet samples.
"""
## choose chunker for h5 arr
breaks = 2
if self.params.nquartets < 5000:
breaks = 1
if self.params.nquartets > 100000:
breaks = 4
if self.params.nquartets > 500000:
breaks = 8
self._chunksize = (self.params.nquartets // (breaks * ncpus) + \
(self.params.nquartets % (breaks * ncpus)))
LOGGER.info("nquarts = %s, chunk = %s", self.params.nquartets, self._chunksize)
## create h5 OUT empty arrays
with h5py.File(self.database.output, 'w') as io5:
io5.create_dataset("quartets",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4))
io5.create_dataset("qstats",
(self.params.nquartets, 4),
dtype=np.uint32,
chunks=(self._chunksize, 4))
io5.create_group("qboots")
## get starting tree, unroot, randomly resolve, ladderize
tre = ete3.Tree(self.files.guidetreefile, format=0)
#tre = toytree.tree(self.files.guidetreefile, format=0)
tre.tree.unroot()
tre.tree.resolve_polytomy(recursive=True)
tre.tree.ladderize()
## randomly sample all splits of tree and convert tip names to indices
splits = [([self.samples.index(z.name) for z in i],
[self.samples.index(z.name) for z in j]) \
for (i, j) in tre.get_edges()]
## only keep internal splits (no single tips edges)
## this seemed to cause problems with unsampled tips
splits = [i for i in splits if all([len(j) > 1 for j in i])]
## turn each into an iterable split sampler
## if the nquartets for that split is small, then sample all of them
## if it is big, then make it a random sampler from that split
qiters = []
## how many min quartets are we gonna sample from each split?
squarts = self.params.nquartets // len(splits)
## how many iterators can be sampled to saturation?
nsaturation = 0
for split in splits:
## if small number at this split then sample all possible sets
## we will exhaust this quickly and then switch to random for
## the larger splits.
if n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2) < squarts*2:
qiter = (i+j for (i, j) in itertools.product(
itertools.combinations(split[0], 2),
itertools.combinations(split[1], 2)))
nsaturation += 1
## else create random sampler across that split, this is slower
## because it can propose the same split repeatedly and so we
## have to check it against the 'sampled' set.
else:
qiter = (random_product(split[0], split[1]) for _ \
in xrange(self.params.nquartets))
nsaturation += 1
## store all iterators into a list
qiters.append(qiter)
#for split in splits:
# print(split)
## make qiters infinitely cycling
qiters = itertools.cycle(qiters)
cycler = itertools.cycle(range(len(splits)))
## store visiting quartets
sampled = set()
## iterate over qiters sampling from each, if one runs out, keep
## sampling from remaining qiters. Keep going until samples is filled
with h5py.File(self.database.input, 'a') as io5:
## create data sets
io5.create_dataset("samples",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4),
compression='gzip')
## fill chunksize at a time for efficiency
i = 0
empty = set()
edge_targeted = 0
random_target = 0
## keep filling quartets until nquartets are sampled
while i < self.params.nquartets:
qdat = []
## keep filling this chunk until its full
while len(qdat) < self._chunksize:
## grab the next iterator
qiter = qiters.next()
cycle = cycler.next()
## sample from iterator
try:
qrtsamp = qiter.next()
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
edge_targeted += 1
#else:
# print('repeat')
## unless iterator is empty, then skip it
except StopIteration:
empty.add(cycle)
## break when all edge samplers are empty
if len(empty) == nsaturation:
break
## if array is not full then add random samples
while len(qdat) < self._chunksize:
qrtsamp = random_combination(range(len(self.samples)), 4)
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
random_target += 1
## stick chunk into h5 array
dat = np.array(qdat, dtype=np.uint16)
io5["samples"][i:i+self._chunksize] = dat[:io5["samples"].shape[0] - i]
i += self._chunksize
print(" equal sampling: {} edge quartets, {} random quartets "\
.format(edge_targeted, random_target))
|
[
"sample",
"quartets",
"evenly",
"across",
"splits",
"of",
"the",
"starting",
"tree",
"and",
"fills",
"in",
"remaining",
"samples",
"with",
"random",
"quartet",
"samples",
".",
"Uses",
"a",
"hash",
"dict",
"to",
"not",
"sample",
"the",
"same",
"quartet",
"twice",
"so",
"for",
"very",
"large",
"trees",
"this",
"can",
"take",
"a",
"few",
"minutes",
"to",
"find",
"millions",
"of",
"possible",
"quartet",
"samples",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L517-L665
|
[
"def",
"_store_equal_samples",
"(",
"self",
",",
"ncpus",
")",
":",
"## choose chunker for h5 arr",
"breaks",
"=",
"2",
"if",
"self",
".",
"params",
".",
"nquartets",
"<",
"5000",
":",
"breaks",
"=",
"1",
"if",
"self",
".",
"params",
".",
"nquartets",
">",
"100000",
":",
"breaks",
"=",
"4",
"if",
"self",
".",
"params",
".",
"nquartets",
">",
"500000",
":",
"breaks",
"=",
"8",
"self",
".",
"_chunksize",
"=",
"(",
"self",
".",
"params",
".",
"nquartets",
"//",
"(",
"breaks",
"*",
"ncpus",
")",
"+",
"(",
"self",
".",
"params",
".",
"nquartets",
"%",
"(",
"breaks",
"*",
"ncpus",
")",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"nquarts = %s, chunk = %s\"",
",",
"self",
".",
"params",
".",
"nquartets",
",",
"self",
".",
"_chunksize",
")",
"## create h5 OUT empty arrays",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"output",
",",
"'w'",
")",
"as",
"io5",
":",
"io5",
".",
"create_dataset",
"(",
"\"quartets\"",
",",
"(",
"self",
".",
"params",
".",
"nquartets",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
",",
"chunks",
"=",
"(",
"self",
".",
"_chunksize",
",",
"4",
")",
")",
"io5",
".",
"create_dataset",
"(",
"\"qstats\"",
",",
"(",
"self",
".",
"params",
".",
"nquartets",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
",",
"chunks",
"=",
"(",
"self",
".",
"_chunksize",
",",
"4",
")",
")",
"io5",
".",
"create_group",
"(",
"\"qboots\"",
")",
"## get starting tree, unroot, randomly resolve, ladderize",
"tre",
"=",
"ete3",
".",
"Tree",
"(",
"self",
".",
"files",
".",
"guidetreefile",
",",
"format",
"=",
"0",
")",
"#tre = toytree.tree(self.files.guidetreefile, format=0)",
"tre",
".",
"tree",
".",
"unroot",
"(",
")",
"tre",
".",
"tree",
".",
"resolve_polytomy",
"(",
"recursive",
"=",
"True",
")",
"tre",
".",
"tree",
".",
"ladderize",
"(",
")",
"## randomly sample all splits of tree and convert tip names to indices",
"splits",
"=",
"[",
"(",
"[",
"self",
".",
"samples",
".",
"index",
"(",
"z",
".",
"name",
")",
"for",
"z",
"in",
"i",
"]",
",",
"[",
"self",
".",
"samples",
".",
"index",
"(",
"z",
".",
"name",
")",
"for",
"z",
"in",
"j",
"]",
")",
"for",
"(",
"i",
",",
"j",
")",
"in",
"tre",
".",
"get_edges",
"(",
")",
"]",
"## only keep internal splits (no single tips edges)",
"## this seemed to cause problems with unsampled tips",
"splits",
"=",
"[",
"i",
"for",
"i",
"in",
"splits",
"if",
"all",
"(",
"[",
"len",
"(",
"j",
")",
">",
"1",
"for",
"j",
"in",
"i",
"]",
")",
"]",
"## turn each into an iterable split sampler",
"## if the nquartets for that split is small, then sample all of them",
"## if it is big, then make it a random sampler from that split",
"qiters",
"=",
"[",
"]",
"## how many min quartets are we gonna sample from each split?",
"squarts",
"=",
"self",
".",
"params",
".",
"nquartets",
"//",
"len",
"(",
"splits",
")",
"## how many iterators can be sampled to saturation?",
"nsaturation",
"=",
"0",
"for",
"split",
"in",
"splits",
":",
"## if small number at this split then sample all possible sets",
"## we will exhaust this quickly and then switch to random for ",
"## the larger splits.",
"if",
"n_choose_k",
"(",
"len",
"(",
"split",
"[",
"0",
"]",
")",
",",
"2",
")",
"*",
"n_choose_k",
"(",
"len",
"(",
"split",
"[",
"1",
"]",
")",
",",
"2",
")",
"<",
"squarts",
"*",
"2",
":",
"qiter",
"=",
"(",
"i",
"+",
"j",
"for",
"(",
"i",
",",
"j",
")",
"in",
"itertools",
".",
"product",
"(",
"itertools",
".",
"combinations",
"(",
"split",
"[",
"0",
"]",
",",
"2",
")",
",",
"itertools",
".",
"combinations",
"(",
"split",
"[",
"1",
"]",
",",
"2",
")",
")",
")",
"nsaturation",
"+=",
"1",
"## else create random sampler across that split, this is slower",
"## because it can propose the same split repeatedly and so we ",
"## have to check it against the 'sampled' set.",
"else",
":",
"qiter",
"=",
"(",
"random_product",
"(",
"split",
"[",
"0",
"]",
",",
"split",
"[",
"1",
"]",
")",
"for",
"_",
"in",
"xrange",
"(",
"self",
".",
"params",
".",
"nquartets",
")",
")",
"nsaturation",
"+=",
"1",
"## store all iterators into a list",
"qiters",
".",
"append",
"(",
"qiter",
")",
"#for split in splits:",
"# print(split)",
"## make qiters infinitely cycling",
"qiters",
"=",
"itertools",
".",
"cycle",
"(",
"qiters",
")",
"cycler",
"=",
"itertools",
".",
"cycle",
"(",
"range",
"(",
"len",
"(",
"splits",
")",
")",
")",
"## store visiting quartets",
"sampled",
"=",
"set",
"(",
")",
"## iterate over qiters sampling from each, if one runs out, keep ",
"## sampling from remaining qiters. Keep going until samples is filled",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"input",
",",
"'a'",
")",
"as",
"io5",
":",
"## create data sets",
"io5",
".",
"create_dataset",
"(",
"\"samples\"",
",",
"(",
"self",
".",
"params",
".",
"nquartets",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint16",
",",
"chunks",
"=",
"(",
"self",
".",
"_chunksize",
",",
"4",
")",
",",
"compression",
"=",
"'gzip'",
")",
"## fill chunksize at a time for efficiency",
"i",
"=",
"0",
"empty",
"=",
"set",
"(",
")",
"edge_targeted",
"=",
"0",
"random_target",
"=",
"0",
"## keep filling quartets until nquartets are sampled",
"while",
"i",
"<",
"self",
".",
"params",
".",
"nquartets",
":",
"qdat",
"=",
"[",
"]",
"## keep filling this chunk until its full",
"while",
"len",
"(",
"qdat",
")",
"<",
"self",
".",
"_chunksize",
":",
"## grab the next iterator",
"qiter",
"=",
"qiters",
".",
"next",
"(",
")",
"cycle",
"=",
"cycler",
".",
"next",
"(",
")",
"## sample from iterator",
"try",
":",
"qrtsamp",
"=",
"qiter",
".",
"next",
"(",
")",
"if",
"tuple",
"(",
"qrtsamp",
")",
"not",
"in",
"sampled",
":",
"qdat",
".",
"append",
"(",
"qrtsamp",
")",
"sampled",
".",
"add",
"(",
"qrtsamp",
")",
"edge_targeted",
"+=",
"1",
"#else:",
"# print('repeat')",
"## unless iterator is empty, then skip it",
"except",
"StopIteration",
":",
"empty",
".",
"add",
"(",
"cycle",
")",
"## break when all edge samplers are empty",
"if",
"len",
"(",
"empty",
")",
"==",
"nsaturation",
":",
"break",
"## if array is not full then add random samples",
"while",
"len",
"(",
"qdat",
")",
"<",
"self",
".",
"_chunksize",
":",
"qrtsamp",
"=",
"random_combination",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"samples",
")",
")",
",",
"4",
")",
"if",
"tuple",
"(",
"qrtsamp",
")",
"not",
"in",
"sampled",
":",
"qdat",
".",
"append",
"(",
"qrtsamp",
")",
"sampled",
".",
"add",
"(",
"qrtsamp",
")",
"random_target",
"+=",
"1",
"## stick chunk into h5 array",
"dat",
"=",
"np",
".",
"array",
"(",
"qdat",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
"io5",
"[",
"\"samples\"",
"]",
"[",
"i",
":",
"i",
"+",
"self",
".",
"_chunksize",
"]",
"=",
"dat",
"[",
":",
"io5",
"[",
"\"samples\"",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"i",
"]",
"i",
"+=",
"self",
".",
"_chunksize",
"print",
"(",
"\" equal sampling: {} edge quartets, {} random quartets \"",
".",
"format",
"(",
"edge_targeted",
",",
"random_target",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._run_qmc
|
runs quartet max-cut on a quartets file
|
ipyrad/analysis/tetrad.py
|
def _run_qmc(self, boot):
""" runs quartet max-cut on a quartets file """
## convert to txt file for wQMC
self._tmp = os.path.join(self.dirs, ".tmpwtre")
cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp]
## run them
proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
res = proc.communicate()
if proc.returncode:
#LOGGER.error("Error in QMC: \n({}).".format(res))
LOGGER.error(res)
raise IPyradWarningExit(res[1])
## read in the tmp files since qmc does not pipe
with open(self._tmp) as intree:
## convert int names back to str names renamer returns a newick str
#tmp = toytree.tree(intree.read().strip())
tmp = ete3.Tree(intree.read().strip())
tmpwtre = self._renamer(tmp)#.tree)
## save the tree
if boot:
self.trees.boots = os.path.join(self.dirs, self.name+".boots")
with open(self.trees.boots, 'a') as outboot:
outboot.write(tmpwtre+"\n")
else:
self.trees.tree = os.path.join(self.dirs, self.name+".tree")
with open(self.trees.tree, 'w') as outtree:
outtree.write(tmpwtre)
## save JSON file checkpoint
self._save()
|
def _run_qmc(self, boot):
""" runs quartet max-cut on a quartets file """
## convert to txt file for wQMC
self._tmp = os.path.join(self.dirs, ".tmpwtre")
cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp]
## run them
proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
res = proc.communicate()
if proc.returncode:
#LOGGER.error("Error in QMC: \n({}).".format(res))
LOGGER.error(res)
raise IPyradWarningExit(res[1])
## read in the tmp files since qmc does not pipe
with open(self._tmp) as intree:
## convert int names back to str names renamer returns a newick str
#tmp = toytree.tree(intree.read().strip())
tmp = ete3.Tree(intree.read().strip())
tmpwtre = self._renamer(tmp)#.tree)
## save the tree
if boot:
self.trees.boots = os.path.join(self.dirs, self.name+".boots")
with open(self.trees.boots, 'a') as outboot:
outboot.write(tmpwtre+"\n")
else:
self.trees.tree = os.path.join(self.dirs, self.name+".tree")
with open(self.trees.tree, 'w') as outtree:
outtree.write(tmpwtre)
## save JSON file checkpoint
self._save()
|
[
"runs",
"quartet",
"max",
"-",
"cut",
"on",
"a",
"quartets",
"file"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L669-L702
|
[
"def",
"_run_qmc",
"(",
"self",
",",
"boot",
")",
":",
"## convert to txt file for wQMC",
"self",
".",
"_tmp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"\".tmpwtre\"",
")",
"cmd",
"=",
"[",
"ip",
".",
"bins",
".",
"qmc",
",",
"\"qrtt=\"",
"+",
"self",
".",
"files",
".",
"qdump",
",",
"\"otre=\"",
"+",
"self",
".",
"_tmp",
"]",
"## run them",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"res",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"proc",
".",
"returncode",
":",
"#LOGGER.error(\"Error in QMC: \\n({}).\".format(res))",
"LOGGER",
".",
"error",
"(",
"res",
")",
"raise",
"IPyradWarningExit",
"(",
"res",
"[",
"1",
"]",
")",
"## read in the tmp files since qmc does not pipe",
"with",
"open",
"(",
"self",
".",
"_tmp",
")",
"as",
"intree",
":",
"## convert int names back to str names renamer returns a newick str",
"#tmp = toytree.tree(intree.read().strip())",
"tmp",
"=",
"ete3",
".",
"Tree",
"(",
"intree",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
")",
"tmpwtre",
"=",
"self",
".",
"_renamer",
"(",
"tmp",
")",
"#.tree)",
"## save the tree",
"if",
"boot",
":",
"self",
".",
"trees",
".",
"boots",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".boots\"",
")",
"with",
"open",
"(",
"self",
".",
"trees",
".",
"boots",
",",
"'a'",
")",
"as",
"outboot",
":",
"outboot",
".",
"write",
"(",
"tmpwtre",
"+",
"\"\\n\"",
")",
"else",
":",
"self",
".",
"trees",
".",
"tree",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".tree\"",
")",
"with",
"open",
"(",
"self",
".",
"trees",
".",
"tree",
",",
"'w'",
")",
"as",
"outtree",
":",
"outtree",
".",
"write",
"(",
"tmpwtre",
")",
"## save JSON file checkpoint",
"self",
".",
"_save",
"(",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._dump_qmc
|
Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC
|
ipyrad/analysis/tetrad.py
|
def _dump_qmc(self):
"""
Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC
"""
## open the h5 database
io5 = h5py.File(self.database.output, 'r')
## create an output file for writing
self.files.qdump = os.path.join(self.dirs, self.name+".quartets.txt")
LOGGER.info("qdump file %s", self.files.qdump)
outfile = open(self.files.qdump, 'w')
## todo: should pull quarts order in randomly? or doesn't matter?
for idx in xrange(0, self.params.nquartets, self._chunksize):
## get mask of zero weight quartets
#mask = io5["weights"][idx:idx+self.chunksize] != 0
#weight = io5["weights"][idx:idx+self.chunksize][mask]
#LOGGER.info("exluded = %s, mask shape %s",
# self._chunksize - mask.shape[0], mask.shape)
#LOGGER.info('q shape %s', io5["quartets"][idx:idx+self._chunksize].shape)
masked_quartets = io5["quartets"][idx:idx+self._chunksize, :]#[mask, :]
quarts = [list(j) for j in masked_quartets]
## format and print
#chunk = ["{},{}|{},{}:{}".format(*i+[j]) for i, j \
# in zip(quarts, weight)]
chunk = ["{},{}|{},{}".format(*i) for i in quarts]
outfile.write("\n".join(chunk)+"\n")
## close output file and h5 database
outfile.close()
io5.close()
|
def _dump_qmc(self):
"""
Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC
"""
## open the h5 database
io5 = h5py.File(self.database.output, 'r')
## create an output file for writing
self.files.qdump = os.path.join(self.dirs, self.name+".quartets.txt")
LOGGER.info("qdump file %s", self.files.qdump)
outfile = open(self.files.qdump, 'w')
## todo: should pull quarts order in randomly? or doesn't matter?
for idx in xrange(0, self.params.nquartets, self._chunksize):
## get mask of zero weight quartets
#mask = io5["weights"][idx:idx+self.chunksize] != 0
#weight = io5["weights"][idx:idx+self.chunksize][mask]
#LOGGER.info("exluded = %s, mask shape %s",
# self._chunksize - mask.shape[0], mask.shape)
#LOGGER.info('q shape %s', io5["quartets"][idx:idx+self._chunksize].shape)
masked_quartets = io5["quartets"][idx:idx+self._chunksize, :]#[mask, :]
quarts = [list(j) for j in masked_quartets]
## format and print
#chunk = ["{},{}|{},{}:{}".format(*i+[j]) for i, j \
# in zip(quarts, weight)]
chunk = ["{},{}|{},{}".format(*i) for i in quarts]
outfile.write("\n".join(chunk)+"\n")
## close output file and h5 database
outfile.close()
io5.close()
|
[
"Makes",
"a",
"reduced",
"array",
"that",
"excludes",
"quartets",
"with",
"no",
"information",
"and",
"prints",
"the",
"quartets",
"and",
"weights",
"to",
"a",
"file",
"formatted",
"for",
"wQMC"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L707-L740
|
[
"def",
"_dump_qmc",
"(",
"self",
")",
":",
"## open the h5 database",
"io5",
"=",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"output",
",",
"'r'",
")",
"## create an output file for writing",
"self",
".",
"files",
".",
"qdump",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".quartets.txt\"",
")",
"LOGGER",
".",
"info",
"(",
"\"qdump file %s\"",
",",
"self",
".",
"files",
".",
"qdump",
")",
"outfile",
"=",
"open",
"(",
"self",
".",
"files",
".",
"qdump",
",",
"'w'",
")",
"## todo: should pull quarts order in randomly? or doesn't matter?",
"for",
"idx",
"in",
"xrange",
"(",
"0",
",",
"self",
".",
"params",
".",
"nquartets",
",",
"self",
".",
"_chunksize",
")",
":",
"## get mask of zero weight quartets",
"#mask = io5[\"weights\"][idx:idx+self.chunksize] != 0",
"#weight = io5[\"weights\"][idx:idx+self.chunksize][mask]",
"#LOGGER.info(\"exluded = %s, mask shape %s\", ",
"# self._chunksize - mask.shape[0], mask.shape)",
"#LOGGER.info('q shape %s', io5[\"quartets\"][idx:idx+self._chunksize].shape)",
"masked_quartets",
"=",
"io5",
"[",
"\"quartets\"",
"]",
"[",
"idx",
":",
"idx",
"+",
"self",
".",
"_chunksize",
",",
":",
"]",
"#[mask, :]",
"quarts",
"=",
"[",
"list",
"(",
"j",
")",
"for",
"j",
"in",
"masked_quartets",
"]",
"## format and print",
"#chunk = [\"{},{}|{},{}:{}\".format(*i+[j]) for i, j \\",
"# in zip(quarts, weight)]",
"chunk",
"=",
"[",
"\"{},{}|{},{}\"",
".",
"format",
"(",
"*",
"i",
")",
"for",
"i",
"in",
"quarts",
"]",
"outfile",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"chunk",
")",
"+",
"\"\\n\"",
")",
"## close output file and h5 database",
"outfile",
".",
"close",
"(",
")",
"io5",
".",
"close",
"(",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._renamer
|
renames newick from numbers to sample names
|
ipyrad/analysis/tetrad.py
|
def _renamer(self, tre):
""" renames newick from numbers to sample names"""
## get the tre with numbered tree tip labels
names = tre.get_leaves()
## replace numbered names with snames
for name in names:
name.name = self.samples[int(name.name)]
## return with only topology and leaf labels
return tre.write(format=9)
|
def _renamer(self, tre):
""" renames newick from numbers to sample names"""
## get the tre with numbered tree tip labels
names = tre.get_leaves()
## replace numbered names with snames
for name in names:
name.name = self.samples[int(name.name)]
## return with only topology and leaf labels
return tre.write(format=9)
|
[
"renames",
"newick",
"from",
"numbers",
"to",
"sample",
"names"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L744-L754
|
[
"def",
"_renamer",
"(",
"self",
",",
"tre",
")",
":",
"## get the tre with numbered tree tip labels",
"names",
"=",
"tre",
".",
"get_leaves",
"(",
")",
"## replace numbered names with snames",
"for",
"name",
"in",
"names",
":",
"name",
".",
"name",
"=",
"self",
".",
"samples",
"[",
"int",
"(",
"name",
".",
"name",
")",
"]",
"## return with only topology and leaf labels",
"return",
"tre",
".",
"write",
"(",
"format",
"=",
"9",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._finalize_stats
|
write final tree files
|
ipyrad/analysis/tetrad.py
|
def _finalize_stats(self, ipyclient):
""" write final tree files """
## print stats file location:
#print(STATSOUT.format(opr(self.files.stats)))
## print finished tree information ---------------------
print(FINALTREES.format(opr(self.trees.tree)))
## print bootstrap information --------------------------
if self.params.nboots:
## get consensus, map values to tree edges, record stats file
self._compute_tree_stats(ipyclient)
## print bootstrap info
print(BOOTTREES.format(opr(self.trees.cons), opr(self.trees.boots)))
## print the ASCII tree only if its small
if len(self.samples) < 20:
if self.params.nboots:
wctre = ete3.Tree(self.trees.cons, format=0)
wctre.ladderize()
print(wctre.get_ascii(show_internal=True,
attributes=["dist", "name"]))
print("")
else:
qtre = ete3.Tree(self.trees.tree, format=0)
qtre.ladderize()
#qtre = toytree.tree(self.trees.tree, format=0)
#qtre.tree.unroot()
print(qtre.get_ascii())
print("")
## print PDF filename & tips -----------------------------
docslink = "https://toytree.readthedocs.io/"
citelink = "https://ipyrad.readthedocs.io/tetrad.html"
print(LINKS.format(docslink, citelink))
|
def _finalize_stats(self, ipyclient):
""" write final tree files """
## print stats file location:
#print(STATSOUT.format(opr(self.files.stats)))
## print finished tree information ---------------------
print(FINALTREES.format(opr(self.trees.tree)))
## print bootstrap information --------------------------
if self.params.nboots:
## get consensus, map values to tree edges, record stats file
self._compute_tree_stats(ipyclient)
## print bootstrap info
print(BOOTTREES.format(opr(self.trees.cons), opr(self.trees.boots)))
## print the ASCII tree only if its small
if len(self.samples) < 20:
if self.params.nboots:
wctre = ete3.Tree(self.trees.cons, format=0)
wctre.ladderize()
print(wctre.get_ascii(show_internal=True,
attributes=["dist", "name"]))
print("")
else:
qtre = ete3.Tree(self.trees.tree, format=0)
qtre.ladderize()
#qtre = toytree.tree(self.trees.tree, format=0)
#qtre.tree.unroot()
print(qtre.get_ascii())
print("")
## print PDF filename & tips -----------------------------
docslink = "https://toytree.readthedocs.io/"
citelink = "https://ipyrad.readthedocs.io/tetrad.html"
print(LINKS.format(docslink, citelink))
|
[
"write",
"final",
"tree",
"files"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L758-L793
|
[
"def",
"_finalize_stats",
"(",
"self",
",",
"ipyclient",
")",
":",
"## print stats file location:",
"#print(STATSOUT.format(opr(self.files.stats)))",
"## print finished tree information ---------------------",
"print",
"(",
"FINALTREES",
".",
"format",
"(",
"opr",
"(",
"self",
".",
"trees",
".",
"tree",
")",
")",
")",
"## print bootstrap information --------------------------",
"if",
"self",
".",
"params",
".",
"nboots",
":",
"## get consensus, map values to tree edges, record stats file",
"self",
".",
"_compute_tree_stats",
"(",
"ipyclient",
")",
"## print bootstrap info",
"print",
"(",
"BOOTTREES",
".",
"format",
"(",
"opr",
"(",
"self",
".",
"trees",
".",
"cons",
")",
",",
"opr",
"(",
"self",
".",
"trees",
".",
"boots",
")",
")",
")",
"## print the ASCII tree only if its small",
"if",
"len",
"(",
"self",
".",
"samples",
")",
"<",
"20",
":",
"if",
"self",
".",
"params",
".",
"nboots",
":",
"wctre",
"=",
"ete3",
".",
"Tree",
"(",
"self",
".",
"trees",
".",
"cons",
",",
"format",
"=",
"0",
")",
"wctre",
".",
"ladderize",
"(",
")",
"print",
"(",
"wctre",
".",
"get_ascii",
"(",
"show_internal",
"=",
"True",
",",
"attributes",
"=",
"[",
"\"dist\"",
",",
"\"name\"",
"]",
")",
")",
"print",
"(",
"\"\"",
")",
"else",
":",
"qtre",
"=",
"ete3",
".",
"Tree",
"(",
"self",
".",
"trees",
".",
"tree",
",",
"format",
"=",
"0",
")",
"qtre",
".",
"ladderize",
"(",
")",
"#qtre = toytree.tree(self.trees.tree, format=0)",
"#qtre.tree.unroot()",
"print",
"(",
"qtre",
".",
"get_ascii",
"(",
")",
")",
"print",
"(",
"\"\"",
")",
"## print PDF filename & tips -----------------------------",
"docslink",
"=",
"\"https://toytree.readthedocs.io/\"",
"citelink",
"=",
"\"https://ipyrad.readthedocs.io/tetrad.html\"",
"print",
"(",
"LINKS",
".",
"format",
"(",
"docslink",
",",
"citelink",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._save
|
save a JSON file representation of Tetrad Class for checkpoint
|
ipyrad/analysis/tetrad.py
|
def _save(self):
""" save a JSON file representation of Tetrad Class for checkpoint"""
## save each attribute as dict
fulldict = copy.deepcopy(self.__dict__)
for i, j in fulldict.items():
if isinstance(j, Params):
fulldict[i] = j.__dict__
fulldumps = json.dumps(fulldict,
sort_keys=False,
indent=4,
separators=(",", ":"),
)
## save to file, make dir if it wasn't made earlier
assemblypath = os.path.join(self.dirs, self.name+".tet.json")
if not os.path.exists(self.dirs):
os.mkdir(self.dirs)
## protect save from interruption
done = 0
while not done:
try:
with open(assemblypath, 'w') as jout:
jout.write(fulldumps)
done = 1
except (KeyboardInterrupt, SystemExit):
print('.')
continue
|
def _save(self):
""" save a JSON file representation of Tetrad Class for checkpoint"""
## save each attribute as dict
fulldict = copy.deepcopy(self.__dict__)
for i, j in fulldict.items():
if isinstance(j, Params):
fulldict[i] = j.__dict__
fulldumps = json.dumps(fulldict,
sort_keys=False,
indent=4,
separators=(",", ":"),
)
## save to file, make dir if it wasn't made earlier
assemblypath = os.path.join(self.dirs, self.name+".tet.json")
if not os.path.exists(self.dirs):
os.mkdir(self.dirs)
## protect save from interruption
done = 0
while not done:
try:
with open(assemblypath, 'w') as jout:
jout.write(fulldumps)
done = 1
except (KeyboardInterrupt, SystemExit):
print('.')
continue
|
[
"save",
"a",
"JSON",
"file",
"representation",
"of",
"Tetrad",
"Class",
"for",
"checkpoint"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L803-L831
|
[
"def",
"_save",
"(",
"self",
")",
":",
"## save each attribute as dict",
"fulldict",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"__dict__",
")",
"for",
"i",
",",
"j",
"in",
"fulldict",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"j",
",",
"Params",
")",
":",
"fulldict",
"[",
"i",
"]",
"=",
"j",
".",
"__dict__",
"fulldumps",
"=",
"json",
".",
"dumps",
"(",
"fulldict",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
",",
")",
"## save to file, make dir if it wasn't made earlier",
"assemblypath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".tet.json\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"dirs",
")",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"dirs",
")",
"## protect save from interruption",
"done",
"=",
"0",
"while",
"not",
"done",
":",
"try",
":",
"with",
"open",
"(",
"assemblypath",
",",
"'w'",
")",
"as",
"jout",
":",
"jout",
".",
"write",
"(",
"fulldumps",
")",
"done",
"=",
"1",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"print",
"(",
"'.'",
")",
"continue"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._insert_to_array
|
inputs results from workers into hdf4 array
|
ipyrad/analysis/tetrad.py
|
def _insert_to_array(self, start, results):
""" inputs results from workers into hdf4 array """
qrts, wgts, qsts = results
#qrts, wgts = results
#print(qrts)
with h5py.File(self.database.output, 'r+') as out:
chunk = self._chunksize
out['quartets'][start:start+chunk] = qrts
##out['weights'][start:start+chunk] = wgts
## entered as 0-indexed !
if self.checkpoint.boots:
key = "qboots/b{}".format(self.checkpoint.boots-1)
out[key][start:start+chunk] = qsts
else:
out["qstats"][start:start+chunk] = qsts
|
def _insert_to_array(self, start, results):
""" inputs results from workers into hdf4 array """
qrts, wgts, qsts = results
#qrts, wgts = results
#print(qrts)
with h5py.File(self.database.output, 'r+') as out:
chunk = self._chunksize
out['quartets'][start:start+chunk] = qrts
##out['weights'][start:start+chunk] = wgts
## entered as 0-indexed !
if self.checkpoint.boots:
key = "qboots/b{}".format(self.checkpoint.boots-1)
out[key][start:start+chunk] = qsts
else:
out["qstats"][start:start+chunk] = qsts
|
[
"inputs",
"results",
"from",
"workers",
"into",
"hdf4",
"array"
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L835-L851
|
[
"def",
"_insert_to_array",
"(",
"self",
",",
"start",
",",
"results",
")",
":",
"qrts",
",",
"wgts",
",",
"qsts",
"=",
"results",
"#qrts, wgts = results",
"#print(qrts)",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"output",
",",
"'r+'",
")",
"as",
"out",
":",
"chunk",
"=",
"self",
".",
"_chunksize",
"out",
"[",
"'quartets'",
"]",
"[",
"start",
":",
"start",
"+",
"chunk",
"]",
"=",
"qrts",
"##out['weights'][start:start+chunk] = wgts",
"## entered as 0-indexed !",
"if",
"self",
".",
"checkpoint",
".",
"boots",
":",
"key",
"=",
"\"qboots/b{}\"",
".",
"format",
"(",
"self",
".",
"checkpoint",
".",
"boots",
"-",
"1",
")",
"out",
"[",
"key",
"]",
"[",
"start",
":",
"start",
"+",
"chunk",
"]",
"=",
"qsts",
"else",
":",
"out",
"[",
"\"qstats\"",
"]",
"[",
"start",
":",
"start",
"+",
"chunk",
"]",
"=",
"qsts"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad.run
|
Run quartet inference on a SNP alignment and distribute work
across an ipyparallel cluster (ipyclient). Unless passed an
ipyclient explicitly, it looks for a running ipcluster instance
running from the defautl ("") profile, and will raise an exception
if one is not found within a set time limit. If not using the default
profile then you can set "profile" as an argument to the tetrad object.
Parameter settings influencing the run (e.g., nquartets, method) should
be set on the tetrad Class object itself.
Parameters
----------
force (bool):
Overwrite results for an object with this name if they exist.
verbose (int):
0=print nothing, 1=print progress bars, 2=print progress bars and
print cluster info.
ipyclient (ipyparallel.Client object):
Default is None (use running Default ipcluster instance). To use
a different ipcluster instance start a Client class object
and pass it in as an argument here.
|
ipyrad/analysis/tetrad.py
|
def run(self, force=0, verbose=2, ipyclient=None):
"""
Run quartet inference on a SNP alignment and distribute work
across an ipyparallel cluster (ipyclient). Unless passed an
ipyclient explicitly, it looks for a running ipcluster instance
running from the defautl ("") profile, and will raise an exception
if one is not found within a set time limit. If not using the default
profile then you can set "profile" as an argument to the tetrad object.
Parameter settings influencing the run (e.g., nquartets, method) should
be set on the tetrad Class object itself.
Parameters
----------
force (bool):
Overwrite results for an object with this name if they exist.
verbose (int):
0=print nothing, 1=print progress bars, 2=print progress bars and
print cluster info.
ipyclient (ipyparallel.Client object):
Default is None (use running Default ipcluster instance). To use
a different ipcluster instance start a Client class object
and pass it in as an argument here.
"""
## clear object results and data if force=True
if force:
self.refresh()
## wrap everything in a try statement so we can ensure that it will
## save if interrupted and we will clean up the
inst = None
try:
## launch and connect to ipcluster instance if doesn't exist
if not ipyclient:
args = self._ipcluster.items() + [("spacer", "")]
ipyclient = ip.core.parallel.get_client(**dict(args))
## print a message about the cluster status
if verbose == 2:
ip.cluster_info(ipyclient)
## grab 2 engines from each host (2 multi-thread jobs per host)
## skips over engines that are busy running something else to avoid
## blocking if user is sharing an ipcluster.
targets = get_targets(ipyclient)
lbview = ipyclient.load_balanced_view(targets=targets)
## store ipyclient pids to the ipcluster instance so we can
## hard-kill them later.
self._ipcluster["pids"] = ipyclient[:].apply(os.getpid).get_dict()
## get or init quartet sampling ---------------------------
## if load=True then chunksize will exist and this will skip
if not self._chunksize:
#self.nquartets = n_choose_k(len(self.samples), 4)
## store N sampled quartets into the h5 array
if self.params.method != 'equal':
self._store_N_samples(ncpus=len(lbview))
else:
self._store_equal_samples(ncpus=len(lbview))
## calculate invariants for the full array ----------------
start = time.time()
if not self.trees.tree:
if verbose:
print("inferring {} induced quartet trees".format(self.params.nquartets))
self._inference(start, lbview, quiet=verbose == 0)
if verbose:
print("")
else:
if verbose:
print("initial tree already inferred")
## calculate for bootstraps -------------------------------
start = time.time()
if self.params.nboots:
if self.checkpoint.boots == self.params.nboots:
if verbose:
print("{} bootstrap trees already inferred".format(self.params.nboots))
else:
while self.checkpoint.boots < self.params.nboots:
## resample bootsstrap seqarray
if self.files.mapfile:
self._sample_bootseq_array_map()
else:
self._sample_bootseq_array()
## start boot inference, (1-indexed !!!)
self.checkpoint.boots += 1
self._inference(start, lbview, quiet=verbose == 0)
if verbose:
print("")
## write outputs with bootstraps ---------------------------
self.files.stats = os.path.join(self.dirs, self.name+"_stats.txt")
if not self.kwargs.get("cli"):
self._compute_tree_stats(ipyclient)
else:
self._finalize_stats(ipyclient)
## handle exceptions so they will be raised after we clean up below
except KeyboardInterrupt as inst:
LOGGER.info("assembly interrupted by user.")
print("\nKeyboard Interrupt by user. Cleaning up...")
except IPyradWarningExit as inst:
LOGGER.info("IPyradWarningExit: %s", inst)
print(inst)
except Exception as inst:
LOGGER.info("caught an unknown exception %s", inst)
print("\n Exception found: {}".format(inst))
## close client when done or interrupted
finally:
try:
## save the Assembly
self._save()
## can't close client if it was never open
if ipyclient:
## send SIGINT (2) to all engines
ipyclient.abort()
LOGGER.info("what %s", self._ipcluster["pids"])
for engine_id, pid in self._ipcluster["pids"].items():
LOGGER.info("eid %s", engine_id)
LOGGER.info("pid %s", pid)
LOGGER.info("queue %s", ipyclient.queue_status()[engine_id]["queue"])
if ipyclient.queue_status()[engine_id]["queue"]:
LOGGER.info('interrupting engine {} w/ SIGINT to {}'\
.format(engine_id, pid))
os.kill(pid, 2)
time.sleep(1)
## if CLI, stop jobs and shutdown
if 'ipyrad-cli' in self._ipcluster["cluster_id"]:
LOGGER.info(" shutting down engines")
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
LOGGER.info(" finished shutdown")
else:
if not ipyclient.outstanding:
ipyclient.purge_everything()
else:
## nanny: kill everything, something bad happened
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted")
## reraise the error now that we're cleaned up
if inst:
raise inst
## if exception is close and save, print and ignore
except Exception as inst2:
print("warning: error during shutdown:\n{}".format(inst2))
LOGGER.error("shutdown warning: %s", inst2)
|
def run(self, force=0, verbose=2, ipyclient=None):
"""
Run quartet inference on a SNP alignment and distribute work
across an ipyparallel cluster (ipyclient). Unless passed an
ipyclient explicitly, it looks for a running ipcluster instance
running from the defautl ("") profile, and will raise an exception
if one is not found within a set time limit. If not using the default
profile then you can set "profile" as an argument to the tetrad object.
Parameter settings influencing the run (e.g., nquartets, method) should
be set on the tetrad Class object itself.
Parameters
----------
force (bool):
Overwrite results for an object with this name if they exist.
verbose (int):
0=print nothing, 1=print progress bars, 2=print progress bars and
print cluster info.
ipyclient (ipyparallel.Client object):
Default is None (use running Default ipcluster instance). To use
a different ipcluster instance start a Client class object
and pass it in as an argument here.
"""
## clear object results and data if force=True
if force:
self.refresh()
## wrap everything in a try statement so we can ensure that it will
## save if interrupted and we will clean up the
inst = None
try:
## launch and connect to ipcluster instance if doesn't exist
if not ipyclient:
args = self._ipcluster.items() + [("spacer", "")]
ipyclient = ip.core.parallel.get_client(**dict(args))
## print a message about the cluster status
if verbose == 2:
ip.cluster_info(ipyclient)
## grab 2 engines from each host (2 multi-thread jobs per host)
## skips over engines that are busy running something else to avoid
## blocking if user is sharing an ipcluster.
targets = get_targets(ipyclient)
lbview = ipyclient.load_balanced_view(targets=targets)
## store ipyclient pids to the ipcluster instance so we can
## hard-kill them later.
self._ipcluster["pids"] = ipyclient[:].apply(os.getpid).get_dict()
## get or init quartet sampling ---------------------------
## if load=True then chunksize will exist and this will skip
if not self._chunksize:
#self.nquartets = n_choose_k(len(self.samples), 4)
## store N sampled quartets into the h5 array
if self.params.method != 'equal':
self._store_N_samples(ncpus=len(lbview))
else:
self._store_equal_samples(ncpus=len(lbview))
## calculate invariants for the full array ----------------
start = time.time()
if not self.trees.tree:
if verbose:
print("inferring {} induced quartet trees".format(self.params.nquartets))
self._inference(start, lbview, quiet=verbose == 0)
if verbose:
print("")
else:
if verbose:
print("initial tree already inferred")
## calculate for bootstraps -------------------------------
start = time.time()
if self.params.nboots:
if self.checkpoint.boots == self.params.nboots:
if verbose:
print("{} bootstrap trees already inferred".format(self.params.nboots))
else:
while self.checkpoint.boots < self.params.nboots:
## resample bootsstrap seqarray
if self.files.mapfile:
self._sample_bootseq_array_map()
else:
self._sample_bootseq_array()
## start boot inference, (1-indexed !!!)
self.checkpoint.boots += 1
self._inference(start, lbview, quiet=verbose == 0)
if verbose:
print("")
## write outputs with bootstraps ---------------------------
self.files.stats = os.path.join(self.dirs, self.name+"_stats.txt")
if not self.kwargs.get("cli"):
self._compute_tree_stats(ipyclient)
else:
self._finalize_stats(ipyclient)
## handle exceptions so they will be raised after we clean up below
except KeyboardInterrupt as inst:
LOGGER.info("assembly interrupted by user.")
print("\nKeyboard Interrupt by user. Cleaning up...")
except IPyradWarningExit as inst:
LOGGER.info("IPyradWarningExit: %s", inst)
print(inst)
except Exception as inst:
LOGGER.info("caught an unknown exception %s", inst)
print("\n Exception found: {}".format(inst))
## close client when done or interrupted
finally:
try:
## save the Assembly
self._save()
## can't close client if it was never open
if ipyclient:
## send SIGINT (2) to all engines
ipyclient.abort()
LOGGER.info("what %s", self._ipcluster["pids"])
for engine_id, pid in self._ipcluster["pids"].items():
LOGGER.info("eid %s", engine_id)
LOGGER.info("pid %s", pid)
LOGGER.info("queue %s", ipyclient.queue_status()[engine_id]["queue"])
if ipyclient.queue_status()[engine_id]["queue"]:
LOGGER.info('interrupting engine {} w/ SIGINT to {}'\
.format(engine_id, pid))
os.kill(pid, 2)
time.sleep(1)
## if CLI, stop jobs and shutdown
if 'ipyrad-cli' in self._ipcluster["cluster_id"]:
LOGGER.info(" shutting down engines")
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
LOGGER.info(" finished shutdown")
else:
if not ipyclient.outstanding:
ipyclient.purge_everything()
else:
## nanny: kill everything, something bad happened
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted")
## reraise the error now that we're cleaned up
if inst:
raise inst
## if exception is close and save, print and ignore
except Exception as inst2:
print("warning: error during shutdown:\n{}".format(inst2))
LOGGER.error("shutdown warning: %s", inst2)
|
[
"Run",
"quartet",
"inference",
"on",
"a",
"SNP",
"alignment",
"and",
"distribute",
"work",
"across",
"an",
"ipyparallel",
"cluster",
"(",
"ipyclient",
")",
".",
"Unless",
"passed",
"an",
"ipyclient",
"explicitly",
"it",
"looks",
"for",
"a",
"running",
"ipcluster",
"instance",
"running",
"from",
"the",
"defautl",
"(",
")",
"profile",
"and",
"will",
"raise",
"an",
"exception",
"if",
"one",
"is",
"not",
"found",
"within",
"a",
"set",
"time",
"limit",
".",
"If",
"not",
"using",
"the",
"default",
"profile",
"then",
"you",
"can",
"set",
"profile",
"as",
"an",
"argument",
"to",
"the",
"tetrad",
"object",
".",
"Parameter",
"settings",
"influencing",
"the",
"run",
"(",
"e",
".",
"g",
".",
"nquartets",
"method",
")",
"should",
"be",
"set",
"on",
"the",
"tetrad",
"Class",
"object",
"itself",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L902-L1059
|
[
"def",
"run",
"(",
"self",
",",
"force",
"=",
"0",
",",
"verbose",
"=",
"2",
",",
"ipyclient",
"=",
"None",
")",
":",
"## clear object results and data if force=True",
"if",
"force",
":",
"self",
".",
"refresh",
"(",
")",
"## wrap everything in a try statement so we can ensure that it will",
"## save if interrupted and we will clean up the ",
"inst",
"=",
"None",
"try",
":",
"## launch and connect to ipcluster instance if doesn't exist",
"if",
"not",
"ipyclient",
":",
"args",
"=",
"self",
".",
"_ipcluster",
".",
"items",
"(",
")",
"+",
"[",
"(",
"\"spacer\"",
",",
"\"\"",
")",
"]",
"ipyclient",
"=",
"ip",
".",
"core",
".",
"parallel",
".",
"get_client",
"(",
"*",
"*",
"dict",
"(",
"args",
")",
")",
"## print a message about the cluster status",
"if",
"verbose",
"==",
"2",
":",
"ip",
".",
"cluster_info",
"(",
"ipyclient",
")",
"## grab 2 engines from each host (2 multi-thread jobs per host)",
"## skips over engines that are busy running something else to avoid",
"## blocking if user is sharing an ipcluster.",
"targets",
"=",
"get_targets",
"(",
"ipyclient",
")",
"lbview",
"=",
"ipyclient",
".",
"load_balanced_view",
"(",
"targets",
"=",
"targets",
")",
"## store ipyclient pids to the ipcluster instance so we can ",
"## hard-kill them later. ",
"self",
".",
"_ipcluster",
"[",
"\"pids\"",
"]",
"=",
"ipyclient",
"[",
":",
"]",
".",
"apply",
"(",
"os",
".",
"getpid",
")",
".",
"get_dict",
"(",
")",
"## get or init quartet sampling ---------------------------",
"## if load=True then chunksize will exist and this will skip",
"if",
"not",
"self",
".",
"_chunksize",
":",
"#self.nquartets = n_choose_k(len(self.samples), 4)",
"## store N sampled quartets into the h5 array",
"if",
"self",
".",
"params",
".",
"method",
"!=",
"'equal'",
":",
"self",
".",
"_store_N_samples",
"(",
"ncpus",
"=",
"len",
"(",
"lbview",
")",
")",
"else",
":",
"self",
".",
"_store_equal_samples",
"(",
"ncpus",
"=",
"len",
"(",
"lbview",
")",
")",
"## calculate invariants for the full array ----------------",
"start",
"=",
"time",
".",
"time",
"(",
")",
"if",
"not",
"self",
".",
"trees",
".",
"tree",
":",
"if",
"verbose",
":",
"print",
"(",
"\"inferring {} induced quartet trees\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"nquartets",
")",
")",
"self",
".",
"_inference",
"(",
"start",
",",
"lbview",
",",
"quiet",
"=",
"verbose",
"==",
"0",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\"",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"\"initial tree already inferred\"",
")",
"## calculate for bootstraps ------------------------------- ",
"start",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"params",
".",
"nboots",
":",
"if",
"self",
".",
"checkpoint",
".",
"boots",
"==",
"self",
".",
"params",
".",
"nboots",
":",
"if",
"verbose",
":",
"print",
"(",
"\"{} bootstrap trees already inferred\"",
".",
"format",
"(",
"self",
".",
"params",
".",
"nboots",
")",
")",
"else",
":",
"while",
"self",
".",
"checkpoint",
".",
"boots",
"<",
"self",
".",
"params",
".",
"nboots",
":",
"## resample bootsstrap seqarray",
"if",
"self",
".",
"files",
".",
"mapfile",
":",
"self",
".",
"_sample_bootseq_array_map",
"(",
")",
"else",
":",
"self",
".",
"_sample_bootseq_array",
"(",
")",
"## start boot inference, (1-indexed !!!)",
"self",
".",
"checkpoint",
".",
"boots",
"+=",
"1",
"self",
".",
"_inference",
"(",
"start",
",",
"lbview",
",",
"quiet",
"=",
"verbose",
"==",
"0",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\"",
")",
"## write outputs with bootstraps ---------------------------",
"self",
".",
"files",
".",
"stats",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\"_stats.txt\"",
")",
"if",
"not",
"self",
".",
"kwargs",
".",
"get",
"(",
"\"cli\"",
")",
":",
"self",
".",
"_compute_tree_stats",
"(",
"ipyclient",
")",
"else",
":",
"self",
".",
"_finalize_stats",
"(",
"ipyclient",
")",
"## handle exceptions so they will be raised after we clean up below",
"except",
"KeyboardInterrupt",
"as",
"inst",
":",
"LOGGER",
".",
"info",
"(",
"\"assembly interrupted by user.\"",
")",
"print",
"(",
"\"\\nKeyboard Interrupt by user. Cleaning up...\"",
")",
"except",
"IPyradWarningExit",
"as",
"inst",
":",
"LOGGER",
".",
"info",
"(",
"\"IPyradWarningExit: %s\"",
",",
"inst",
")",
"print",
"(",
"inst",
")",
"except",
"Exception",
"as",
"inst",
":",
"LOGGER",
".",
"info",
"(",
"\"caught an unknown exception %s\"",
",",
"inst",
")",
"print",
"(",
"\"\\n Exception found: {}\"",
".",
"format",
"(",
"inst",
")",
")",
"## close client when done or interrupted",
"finally",
":",
"try",
":",
"## save the Assembly",
"self",
".",
"_save",
"(",
")",
"## can't close client if it was never open",
"if",
"ipyclient",
":",
"## send SIGINT (2) to all engines",
"ipyclient",
".",
"abort",
"(",
")",
"LOGGER",
".",
"info",
"(",
"\"what %s\"",
",",
"self",
".",
"_ipcluster",
"[",
"\"pids\"",
"]",
")",
"for",
"engine_id",
",",
"pid",
"in",
"self",
".",
"_ipcluster",
"[",
"\"pids\"",
"]",
".",
"items",
"(",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"eid %s\"",
",",
"engine_id",
")",
"LOGGER",
".",
"info",
"(",
"\"pid %s\"",
",",
"pid",
")",
"LOGGER",
".",
"info",
"(",
"\"queue %s\"",
",",
"ipyclient",
".",
"queue_status",
"(",
")",
"[",
"engine_id",
"]",
"[",
"\"queue\"",
"]",
")",
"if",
"ipyclient",
".",
"queue_status",
"(",
")",
"[",
"engine_id",
"]",
"[",
"\"queue\"",
"]",
":",
"LOGGER",
".",
"info",
"(",
"'interrupting engine {} w/ SIGINT to {}'",
".",
"format",
"(",
"engine_id",
",",
"pid",
")",
")",
"os",
".",
"kill",
"(",
"pid",
",",
"2",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"## if CLI, stop jobs and shutdown",
"if",
"'ipyrad-cli'",
"in",
"self",
".",
"_ipcluster",
"[",
"\"cluster_id\"",
"]",
":",
"LOGGER",
".",
"info",
"(",
"\" shutting down engines\"",
")",
"ipyclient",
".",
"shutdown",
"(",
"hub",
"=",
"True",
",",
"block",
"=",
"False",
")",
"ipyclient",
".",
"close",
"(",
")",
"LOGGER",
".",
"info",
"(",
"\" finished shutdown\"",
")",
"else",
":",
"if",
"not",
"ipyclient",
".",
"outstanding",
":",
"ipyclient",
".",
"purge_everything",
"(",
")",
"else",
":",
"## nanny: kill everything, something bad happened",
"ipyclient",
".",
"shutdown",
"(",
"hub",
"=",
"True",
",",
"block",
"=",
"False",
")",
"ipyclient",
".",
"close",
"(",
")",
"print",
"(",
"\"\\nwarning: ipcluster shutdown and must be restarted\"",
")",
"## reraise the error now that we're cleaned up",
"if",
"inst",
":",
"raise",
"inst",
"## if exception is close and save, print and ignore",
"except",
"Exception",
"as",
"inst2",
":",
"print",
"(",
"\"warning: error during shutdown:\\n{}\"",
".",
"format",
"(",
"inst2",
")",
")",
"LOGGER",
".",
"error",
"(",
"\"shutdown warning: %s\"",
",",
"inst2",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
Tetrad._inference
|
Inference sends slices of jobs to the parallel engines for computing
and collects the results into the output hdf5 array as they finish.
|
ipyrad/analysis/tetrad.py
|
def _inference(self, start, lbview, quiet=False):
"""
Inference sends slices of jobs to the parallel engines for computing
and collects the results into the output hdf5 array as they finish.
"""
## an iterator to distribute sampled quartets in chunks
gen = xrange(self.checkpoint.arr, self.params.nquartets, self._chunksize)
njobs = sum(1 for _ in gen)
jobiter = iter(gen)
LOGGER.info("chunksize: %s, start: %s, total: %s, njobs: %s", \
self._chunksize, self.checkpoint.arr, self.params.nquartets, njobs)
## if bootstrap create an output array for results unless we are
## restarting an existing boot, then use the one already present
key = "b{}".format(self.checkpoint.boots)
with h5py.File(self.database.output, 'r+') as out:
if key not in out["qboots"].keys():
out["qboots"].create_dataset(key,
(self.params.nquartets, 4),
dtype=np.uint32,
chunks=(self._chunksize, 4))
## initial progress bar
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not self.checkpoint.boots:
printstr = " initial tree | {} | "
if not quiet:
progressbar(1, 0, printstr.format(elapsed), spacer="")
else:
printstr = " boot {:<7} | {} | "
if not quiet:
progressbar(self.params.nboots, self.checkpoint.boots,
printstr.format(self.checkpoint.boots, elapsed), spacer="")
## submit all jobs to be distributed across nodes
res = {}
for _ in xrange(njobs):
## get chunk of quartet samples and send to a worker engine
qidx = jobiter.next()
LOGGER.info('submitting chunk: %s', qidx)
#res[qidx] = lbview.apply(nworker, *[self, qidx, TESTS])
with h5py.File(self.database.input, 'r') as inh5:
smps = inh5["samples"][qidx:qidx+self._chunksize]
res[qidx] = lbview.apply(nworker, *[self, smps, TESTS])
## keep adding jobs until the jobiter is empty
done = 0
while 1:
## check for finished jobs
curkeys = res.keys()
finished = [i.ready() for i in res.values()]
## remove finished and submit new jobs
if any(finished):
for ikey in curkeys:
if res[ikey].ready():
if res[ikey].successful():
## track finished
done += 1
## insert results into hdf5 data base
results = res[ikey].get(0)
LOGGER.info("%s", results[1])
self._insert_to_array(ikey, results) #, bidx)
## purge memory of the old one
del res[ikey]
else:
## print error if something went wrong
raise IPyradWarningExit(""" error in 'inference'\n{}
""".format(res[ikey].exception()))
## submit new jobs
try:
## send chunk off to be worked on
qidx = jobiter.next()
with h5py.File(self.database.input, 'r') as inh5:
smps = inh5["samples"][qidx:qidx+self._chunksize]
res[qidx] = lbview.apply(nworker, *[self, smps, TESTS])
## if no more jobs then just wait until these are done
except StopIteration:
continue
else:
time.sleep(0.01)
## print progress unless bootstrapping, diff progbar for that.
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not self.checkpoint.boots:
if not quiet:
progressbar(njobs, done, printstr.format(elapsed), spacer="")
else:
if not quiet:
progressbar(self.params.nboots, self.checkpoint.boots,
printstr.format(self.checkpoint.boots, elapsed),
spacer="")
## done is counted on finish, so this means we're done
if njobs == done:
break
## dump quartets to a file
self._dump_qmc()
## send to qmc
if not self.checkpoint.boots:
self._run_qmc(0)
else:
self._run_qmc(1)
## reset the checkpoint_arr
self.checkpoint.arr = 0
|
def _inference(self, start, lbview, quiet=False):
"""
Inference sends slices of jobs to the parallel engines for computing
and collects the results into the output hdf5 array as they finish.
"""
## an iterator to distribute sampled quartets in chunks
gen = xrange(self.checkpoint.arr, self.params.nquartets, self._chunksize)
njobs = sum(1 for _ in gen)
jobiter = iter(gen)
LOGGER.info("chunksize: %s, start: %s, total: %s, njobs: %s", \
self._chunksize, self.checkpoint.arr, self.params.nquartets, njobs)
## if bootstrap create an output array for results unless we are
## restarting an existing boot, then use the one already present
key = "b{}".format(self.checkpoint.boots)
with h5py.File(self.database.output, 'r+') as out:
if key not in out["qboots"].keys():
out["qboots"].create_dataset(key,
(self.params.nquartets, 4),
dtype=np.uint32,
chunks=(self._chunksize, 4))
## initial progress bar
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not self.checkpoint.boots:
printstr = " initial tree | {} | "
if not quiet:
progressbar(1, 0, printstr.format(elapsed), spacer="")
else:
printstr = " boot {:<7} | {} | "
if not quiet:
progressbar(self.params.nboots, self.checkpoint.boots,
printstr.format(self.checkpoint.boots, elapsed), spacer="")
## submit all jobs to be distributed across nodes
res = {}
for _ in xrange(njobs):
## get chunk of quartet samples and send to a worker engine
qidx = jobiter.next()
LOGGER.info('submitting chunk: %s', qidx)
#res[qidx] = lbview.apply(nworker, *[self, qidx, TESTS])
with h5py.File(self.database.input, 'r') as inh5:
smps = inh5["samples"][qidx:qidx+self._chunksize]
res[qidx] = lbview.apply(nworker, *[self, smps, TESTS])
## keep adding jobs until the jobiter is empty
done = 0
while 1:
## check for finished jobs
curkeys = res.keys()
finished = [i.ready() for i in res.values()]
## remove finished and submit new jobs
if any(finished):
for ikey in curkeys:
if res[ikey].ready():
if res[ikey].successful():
## track finished
done += 1
## insert results into hdf5 data base
results = res[ikey].get(0)
LOGGER.info("%s", results[1])
self._insert_to_array(ikey, results) #, bidx)
## purge memory of the old one
del res[ikey]
else:
## print error if something went wrong
raise IPyradWarningExit(""" error in 'inference'\n{}
""".format(res[ikey].exception()))
## submit new jobs
try:
## send chunk off to be worked on
qidx = jobiter.next()
with h5py.File(self.database.input, 'r') as inh5:
smps = inh5["samples"][qidx:qidx+self._chunksize]
res[qidx] = lbview.apply(nworker, *[self, smps, TESTS])
## if no more jobs then just wait until these are done
except StopIteration:
continue
else:
time.sleep(0.01)
## print progress unless bootstrapping, diff progbar for that.
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not self.checkpoint.boots:
if not quiet:
progressbar(njobs, done, printstr.format(elapsed), spacer="")
else:
if not quiet:
progressbar(self.params.nboots, self.checkpoint.boots,
printstr.format(self.checkpoint.boots, elapsed),
spacer="")
## done is counted on finish, so this means we're done
if njobs == done:
break
## dump quartets to a file
self._dump_qmc()
## send to qmc
if not self.checkpoint.boots:
self._run_qmc(0)
else:
self._run_qmc(1)
## reset the checkpoint_arr
self.checkpoint.arr = 0
|
[
"Inference",
"sends",
"slices",
"of",
"jobs",
"to",
"the",
"parallel",
"engines",
"for",
"computing",
"and",
"collects",
"the",
"results",
"into",
"the",
"output",
"hdf5",
"array",
"as",
"they",
"finish",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1063-L1174
|
[
"def",
"_inference",
"(",
"self",
",",
"start",
",",
"lbview",
",",
"quiet",
"=",
"False",
")",
":",
"## an iterator to distribute sampled quartets in chunks",
"gen",
"=",
"xrange",
"(",
"self",
".",
"checkpoint",
".",
"arr",
",",
"self",
".",
"params",
".",
"nquartets",
",",
"self",
".",
"_chunksize",
")",
"njobs",
"=",
"sum",
"(",
"1",
"for",
"_",
"in",
"gen",
")",
"jobiter",
"=",
"iter",
"(",
"gen",
")",
"LOGGER",
".",
"info",
"(",
"\"chunksize: %s, start: %s, total: %s, njobs: %s\"",
",",
"self",
".",
"_chunksize",
",",
"self",
".",
"checkpoint",
".",
"arr",
",",
"self",
".",
"params",
".",
"nquartets",
",",
"njobs",
")",
"## if bootstrap create an output array for results unless we are ",
"## restarting an existing boot, then use the one already present",
"key",
"=",
"\"b{}\"",
".",
"format",
"(",
"self",
".",
"checkpoint",
".",
"boots",
")",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"output",
",",
"'r+'",
")",
"as",
"out",
":",
"if",
"key",
"not",
"in",
"out",
"[",
"\"qboots\"",
"]",
".",
"keys",
"(",
")",
":",
"out",
"[",
"\"qboots\"",
"]",
".",
"create_dataset",
"(",
"key",
",",
"(",
"self",
".",
"params",
".",
"nquartets",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
",",
"chunks",
"=",
"(",
"self",
".",
"_chunksize",
",",
"4",
")",
")",
"## initial progress bar",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"if",
"not",
"self",
".",
"checkpoint",
".",
"boots",
":",
"printstr",
"=",
"\" initial tree | {} | \"",
"if",
"not",
"quiet",
":",
"progressbar",
"(",
"1",
",",
"0",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"\"\"",
")",
"else",
":",
"printstr",
"=",
"\" boot {:<7} | {} | \"",
"if",
"not",
"quiet",
":",
"progressbar",
"(",
"self",
".",
"params",
".",
"nboots",
",",
"self",
".",
"checkpoint",
".",
"boots",
",",
"printstr",
".",
"format",
"(",
"self",
".",
"checkpoint",
".",
"boots",
",",
"elapsed",
")",
",",
"spacer",
"=",
"\"\"",
")",
"## submit all jobs to be distributed across nodes",
"res",
"=",
"{",
"}",
"for",
"_",
"in",
"xrange",
"(",
"njobs",
")",
":",
"## get chunk of quartet samples and send to a worker engine",
"qidx",
"=",
"jobiter",
".",
"next",
"(",
")",
"LOGGER",
".",
"info",
"(",
"'submitting chunk: %s'",
",",
"qidx",
")",
"#res[qidx] = lbview.apply(nworker, *[self, qidx, TESTS])",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"input",
",",
"'r'",
")",
"as",
"inh5",
":",
"smps",
"=",
"inh5",
"[",
"\"samples\"",
"]",
"[",
"qidx",
":",
"qidx",
"+",
"self",
".",
"_chunksize",
"]",
"res",
"[",
"qidx",
"]",
"=",
"lbview",
".",
"apply",
"(",
"nworker",
",",
"*",
"[",
"self",
",",
"smps",
",",
"TESTS",
"]",
")",
"## keep adding jobs until the jobiter is empty",
"done",
"=",
"0",
"while",
"1",
":",
"## check for finished jobs",
"curkeys",
"=",
"res",
".",
"keys",
"(",
")",
"finished",
"=",
"[",
"i",
".",
"ready",
"(",
")",
"for",
"i",
"in",
"res",
".",
"values",
"(",
")",
"]",
"## remove finished and submit new jobs",
"if",
"any",
"(",
"finished",
")",
":",
"for",
"ikey",
"in",
"curkeys",
":",
"if",
"res",
"[",
"ikey",
"]",
".",
"ready",
"(",
")",
":",
"if",
"res",
"[",
"ikey",
"]",
".",
"successful",
"(",
")",
":",
"## track finished",
"done",
"+=",
"1",
"## insert results into hdf5 data base",
"results",
"=",
"res",
"[",
"ikey",
"]",
".",
"get",
"(",
"0",
")",
"LOGGER",
".",
"info",
"(",
"\"%s\"",
",",
"results",
"[",
"1",
"]",
")",
"self",
".",
"_insert_to_array",
"(",
"ikey",
",",
"results",
")",
"#, bidx)",
"## purge memory of the old one",
"del",
"res",
"[",
"ikey",
"]",
"else",
":",
"## print error if something went wrong",
"raise",
"IPyradWarningExit",
"(",
"\"\"\" error in 'inference'\\n{}\n \"\"\"",
".",
"format",
"(",
"res",
"[",
"ikey",
"]",
".",
"exception",
"(",
")",
")",
")",
"## submit new jobs",
"try",
":",
"## send chunk off to be worked on",
"qidx",
"=",
"jobiter",
".",
"next",
"(",
")",
"with",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"input",
",",
"'r'",
")",
"as",
"inh5",
":",
"smps",
"=",
"inh5",
"[",
"\"samples\"",
"]",
"[",
"qidx",
":",
"qidx",
"+",
"self",
".",
"_chunksize",
"]",
"res",
"[",
"qidx",
"]",
"=",
"lbview",
".",
"apply",
"(",
"nworker",
",",
"*",
"[",
"self",
",",
"smps",
",",
"TESTS",
"]",
")",
"## if no more jobs then just wait until these are done",
"except",
"StopIteration",
":",
"continue",
"else",
":",
"time",
".",
"sleep",
"(",
"0.01",
")",
"## print progress unless bootstrapping, diff progbar for that.",
"elapsed",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"if",
"not",
"self",
".",
"checkpoint",
".",
"boots",
":",
"if",
"not",
"quiet",
":",
"progressbar",
"(",
"njobs",
",",
"done",
",",
"printstr",
".",
"format",
"(",
"elapsed",
")",
",",
"spacer",
"=",
"\"\"",
")",
"else",
":",
"if",
"not",
"quiet",
":",
"progressbar",
"(",
"self",
".",
"params",
".",
"nboots",
",",
"self",
".",
"checkpoint",
".",
"boots",
",",
"printstr",
".",
"format",
"(",
"self",
".",
"checkpoint",
".",
"boots",
",",
"elapsed",
")",
",",
"spacer",
"=",
"\"\"",
")",
"## done is counted on finish, so this means we're done",
"if",
"njobs",
"==",
"done",
":",
"break",
"## dump quartets to a file",
"self",
".",
"_dump_qmc",
"(",
")",
"## send to qmc",
"if",
"not",
"self",
".",
"checkpoint",
".",
"boots",
":",
"self",
".",
"_run_qmc",
"(",
"0",
")",
"else",
":",
"self",
".",
"_run_qmc",
"(",
"1",
")",
"## reset the checkpoint_arr",
"self",
".",
"checkpoint",
".",
"arr",
"=",
"0"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
run
|
Check all samples requested have been clustered (state=6), make output
directory, then create the requested outfiles. Excluded samples are already
removed from samples.
|
ipyrad/assemble/write_outfiles.py
|
def run(data, samples, force, ipyclient):
"""
Check all samples requested have been clustered (state=6), make output
directory, then create the requested outfiles. Excluded samples are already
removed from samples.
"""
## prepare dirs
data.dirs.outfiles = os.path.join(data.dirs.project, data.name+"_outfiles")
if not os.path.exists(data.dirs.outfiles):
os.mkdir(data.dirs.outfiles)
## make the snps/filters data base, fills the dups and inds filters
## and fills the splits locations
data.database = os.path.join(data.dirs.outfiles, data.name+".hdf5")
init_arrays(data)
## Apply filters to supercatg and superhdf5 with selected samples
## and fill the filters and edge arrays.
filter_all_clusters(data, samples, ipyclient)
## Everything needed is in the now filled h5 database. Filters were applied
## with 'samples' taken into account. Now we create the loci file (default)
## output and build a stats file.
data.outfiles.loci = os.path.join(data.dirs.outfiles, data.name+".loci")
data.outfiles.alleles = os.path.join(data.dirs.outfiles, data.name+".alleles.loci")
make_loci_and_stats(data, samples, ipyclient)
## OPTIONAL OUTPUTS:
output_formats = data.paramsdict["output_formats"]
## held separate from *output_formats cuz it's big and parallelized
if any([x in output_formats for x in ["v", "V"]]):
full = "V" in output_formats
try:
make_vcf(data, samples, ipyclient, full=full)
except IPyradWarningExit as inst:
## Something fsck vcf build. Sometimes this is simply a memory
## issue, so trap the exception and allow it to try building
## the other output formats.
print(" Error building vcf. See ipyrad_log.txt for details.")
LOGGER.error(inst)
## make other array-based formats, recalcs keeps and arrays
make_outfiles(data, samples, output_formats, ipyclient)
## print friendly message
shortpath = data.dirs.outfiles.replace(os.path.expanduser("~"), "~")
print("{}Outfiles written to: {}\n".format(data._spacer, shortpath))
|
def run(data, samples, force, ipyclient):
"""
Check all samples requested have been clustered (state=6), make output
directory, then create the requested outfiles. Excluded samples are already
removed from samples.
"""
## prepare dirs
data.dirs.outfiles = os.path.join(data.dirs.project, data.name+"_outfiles")
if not os.path.exists(data.dirs.outfiles):
os.mkdir(data.dirs.outfiles)
## make the snps/filters data base, fills the dups and inds filters
## and fills the splits locations
data.database = os.path.join(data.dirs.outfiles, data.name+".hdf5")
init_arrays(data)
## Apply filters to supercatg and superhdf5 with selected samples
## and fill the filters and edge arrays.
filter_all_clusters(data, samples, ipyclient)
## Everything needed is in the now filled h5 database. Filters were applied
## with 'samples' taken into account. Now we create the loci file (default)
## output and build a stats file.
data.outfiles.loci = os.path.join(data.dirs.outfiles, data.name+".loci")
data.outfiles.alleles = os.path.join(data.dirs.outfiles, data.name+".alleles.loci")
make_loci_and_stats(data, samples, ipyclient)
## OPTIONAL OUTPUTS:
output_formats = data.paramsdict["output_formats"]
## held separate from *output_formats cuz it's big and parallelized
if any([x in output_formats for x in ["v", "V"]]):
full = "V" in output_formats
try:
make_vcf(data, samples, ipyclient, full=full)
except IPyradWarningExit as inst:
## Something fsck vcf build. Sometimes this is simply a memory
## issue, so trap the exception and allow it to try building
## the other output formats.
print(" Error building vcf. See ipyrad_log.txt for details.")
LOGGER.error(inst)
## make other array-based formats, recalcs keeps and arrays
make_outfiles(data, samples, output_formats, ipyclient)
## print friendly message
shortpath = data.dirs.outfiles.replace(os.path.expanduser("~"), "~")
print("{}Outfiles written to: {}\n".format(data._spacer, shortpath))
|
[
"Check",
"all",
"samples",
"requested",
"have",
"been",
"clustered",
"(",
"state",
"=",
"6",
")",
"make",
"output",
"directory",
"then",
"create",
"the",
"requested",
"outfiles",
".",
"Excluded",
"samples",
"are",
"already",
"removed",
"from",
"samples",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/write_outfiles.py#L72-L120
|
[
"def",
"run",
"(",
"data",
",",
"samples",
",",
"force",
",",
"ipyclient",
")",
":",
"## prepare dirs",
"data",
".",
"dirs",
".",
"outfiles",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"project",
",",
"data",
".",
"name",
"+",
"\"_outfiles\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"data",
".",
"dirs",
".",
"outfiles",
")",
":",
"os",
".",
"mkdir",
"(",
"data",
".",
"dirs",
".",
"outfiles",
")",
"## make the snps/filters data base, fills the dups and inds filters",
"## and fills the splits locations",
"data",
".",
"database",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"outfiles",
",",
"data",
".",
"name",
"+",
"\".hdf5\"",
")",
"init_arrays",
"(",
"data",
")",
"## Apply filters to supercatg and superhdf5 with selected samples",
"## and fill the filters and edge arrays.",
"filter_all_clusters",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
"## Everything needed is in the now filled h5 database. Filters were applied",
"## with 'samples' taken into account. Now we create the loci file (default)",
"## output and build a stats file.",
"data",
".",
"outfiles",
".",
"loci",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"outfiles",
",",
"data",
".",
"name",
"+",
"\".loci\"",
")",
"data",
".",
"outfiles",
".",
"alleles",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"outfiles",
",",
"data",
".",
"name",
"+",
"\".alleles.loci\"",
")",
"make_loci_and_stats",
"(",
"data",
",",
"samples",
",",
"ipyclient",
")",
"## OPTIONAL OUTPUTS:",
"output_formats",
"=",
"data",
".",
"paramsdict",
"[",
"\"output_formats\"",
"]",
"## held separate from *output_formats cuz it's big and parallelized",
"if",
"any",
"(",
"[",
"x",
"in",
"output_formats",
"for",
"x",
"in",
"[",
"\"v\"",
",",
"\"V\"",
"]",
"]",
")",
":",
"full",
"=",
"\"V\"",
"in",
"output_formats",
"try",
":",
"make_vcf",
"(",
"data",
",",
"samples",
",",
"ipyclient",
",",
"full",
"=",
"full",
")",
"except",
"IPyradWarningExit",
"as",
"inst",
":",
"## Something fsck vcf build. Sometimes this is simply a memory",
"## issue, so trap the exception and allow it to try building",
"## the other output formats.",
"print",
"(",
"\" Error building vcf. See ipyrad_log.txt for details.\"",
")",
"LOGGER",
".",
"error",
"(",
"inst",
")",
"## make other array-based formats, recalcs keeps and arrays",
"make_outfiles",
"(",
"data",
",",
"samples",
",",
"output_formats",
",",
"ipyclient",
")",
"## print friendly message",
"shortpath",
"=",
"data",
".",
"dirs",
".",
"outfiles",
".",
"replace",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
",",
"\"~\"",
")",
"print",
"(",
"\"{}Outfiles written to: {}\\n\"",
".",
"format",
"(",
"data",
".",
"_spacer",
",",
"shortpath",
")",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
make_stats
|
write the output stats file and save to Assembly obj.
|
ipyrad/assemble/write_outfiles.py
|
def make_stats(data, samples, samplecounts, locuscounts):
""" write the output stats file and save to Assembly obj."""
## get meta info
with h5py.File(data.clust_database, 'r') as io5:
anames = io5["seqs"].attrs["samples"]
nloci = io5["seqs"].shape[0]
optim = io5["seqs"].attrs["chunksize"][0]
## open the out handle. This will have three data frames saved to it.
## locus_filtering, sample_coverages, and snp_distributions
data.stats_files.s7 = os.path.join(data.dirs.outfiles,
data.name+"_stats.txt")
outstats = io.open(data.stats_files.s7, 'w', encoding="utf-8")
########################################################################
## get stats for locus_filtering, use chunking.
filters = np.zeros(6, dtype=int)
passed = 0
start = 0
piscounts = Counter()
varcounts = Counter()
for i in range(200):
piscounts[i] = 0
varcounts[i] = 0
applied = pd.Series([0]*8,
name="applied_order",
index=[
"total_prefiltered_loci",
"filtered_by_rm_duplicates",
"filtered_by_max_indels",
"filtered_by_max_snps",
"filtered_by_max_shared_het",
"filtered_by_min_sample",
"filtered_by_max_alleles",
"total_filtered_loci"])
## load the h5 database
co5 = h5py.File(data.database, 'r')
while start < nloci:
hslice = [start, start+optim]
## load each array
afilt = co5["filters"][hslice[0]:hslice[1], ]
asnps = co5["snps"][hslice[0]:hslice[1], ]
## get subarray results from filter array
# max_indels, max_snps, max_hets, min_samps, bad_edges, max_alleles
filters += afilt.sum(axis=0)
applied["filtered_by_rm_duplicates"] += afilt[:, 0].sum()
mask = afilt[:, 0].astype(np.bool)
applied["filtered_by_max_indels"] += afilt[~mask, 1].sum()
mask = afilt[:, 0:2].sum(axis=1).astype(np.bool)
applied["filtered_by_max_snps"] += afilt[~mask, 2].sum()
mask = afilt[:, 0:3].sum(axis=1).astype(np.bool)
applied["filtered_by_max_shared_het"] += afilt[~mask, 3].sum()
mask = afilt[:, 0:4].sum(axis=1).astype(np.bool)
applied["filtered_by_min_sample"] += afilt[~mask, 4].sum()
mask = afilt[:, 0:5].sum(axis=1).astype(np.bool)
applied["filtered_by_max_alleles"] += afilt[~mask, 5].sum()
passed += np.sum(afilt.sum(axis=1) == 0)
## get filter to count snps for only passed loci
## should we filter by all vars, or just by pis? doing all var now.
apply_filter = afilt.sum(axis=1).astype(np.bool)
## get snps counts
snplocs = asnps[~apply_filter, :].sum(axis=1)
varlocs = snplocs.sum(axis=1)
varcounts.update(Counter(varlocs))
#snpcounts.update(Counter(snplocs[:, 0]))
piscounts.update(Counter(snplocs[:, 1]))
## increase counter to advance through h5 database
start += optim
## record filtering of loci from total to final
filtdat = pd.Series(np.concatenate([[nloci], filters, [passed]]),
name="total_filters",
index=[
"total_prefiltered_loci",
"filtered_by_rm_duplicates",
"filtered_by_max_indels",
"filtered_by_max_snps",
"filtered_by_max_shared_het",
"filtered_by_min_sample",
"filtered_by_max_alleles",
"total_filtered_loci"])
retained = pd.Series([0]*8,
name="retained_loci",
index=[
"total_prefiltered_loci",
"filtered_by_rm_duplicates",
"filtered_by_max_indels",
"filtered_by_max_snps",
"filtered_by_max_shared_het",
"filtered_by_min_sample",
"filtered_by_max_alleles",
"total_filtered_loci"])
retained["total_prefiltered_loci"] = nloci
retained["filtered_by_rm_duplicates"] = nloci - applied["filtered_by_rm_duplicates"]
retained["filtered_by_max_indels"] = retained["filtered_by_rm_duplicates"] - applied["filtered_by_max_indels"]
retained["filtered_by_max_snps"] = retained["filtered_by_max_indels"] - applied["filtered_by_max_snps"]
retained["filtered_by_max_shared_het"] = retained["filtered_by_max_snps"] - applied["filtered_by_max_shared_het"]
retained["filtered_by_min_sample"] = retained["filtered_by_max_shared_het"] - applied["filtered_by_min_sample"]
retained["filtered_by_max_alleles"] = retained["filtered_by_min_sample"] - applied["filtered_by_max_alleles"]
retained["total_filtered_loci"] = passed
print(u"\n\n## The number of loci caught by each filter."+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_filters\n",
file=outstats)
data.stats_dfs.s7_filters = pd.DataFrame([filtdat, applied, retained]).T
data.stats_dfs.s7_filters.to_string(buf=outstats)
########################################################################
## make dataframe of sample_coverages
## samplecounts is len of anames from db. Save only samples in samples.
#print(samplecounts)
#samples = [i.name for i in samples]
## get sample names in the order of anames
#sids = [list(anames).index(i) for i in samples]
#covdict = {name: val for name, val in zip(np.array(samples)[sidx], samplecounts)}
#covdict = {name: val for name, val in zip(samples, samplecounts[sidx])}
covdict = pd.Series(samplecounts, name="sample_coverage", index=anames)
covdict = covdict[covdict != 0]
print(u"\n\n\n## The number of loci recovered for each Sample."+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_samples\n",
file=outstats)
data.stats_dfs.s7_samples = pd.DataFrame(covdict)
data.stats_dfs.s7_samples.to_string(buf=outstats)
########################################################################
## get stats for locus coverage
lrange = range(1, len(samples)+1)
locdat = pd.Series(locuscounts, name="locus_coverage", index=lrange)
start = data.paramsdict["min_samples_locus"]-1
locsums = pd.Series({i: np.sum(locdat.values[start:i]) for i in lrange},
name="sum_coverage", index=lrange)
print(u"\n\n\n## The number of loci for which N taxa have data."+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_loci\n",
file=outstats)
data.stats_dfs.s7_loci = pd.concat([locdat, locsums], axis=1)
data.stats_dfs.s7_loci.to_string(buf=outstats)
#########################################################################
## get stats for SNP_distribution
try:
smax = max([i+1 for i in varcounts if varcounts[i]])
except Exception as inst:
raise IPyradWarningExit("""
Exception: empty varcounts array. This could be because no samples
passed filtering, or it could be because you have overzealous filtering.
Check the values for `trim_loci` and make sure you are not trimming the
edge too far
""")
vardat = pd.Series(varcounts, name="var", index=range(smax)).fillna(0)
sumd = {}
for i in range(smax):
sumd[i] = np.sum([i*vardat.values[i] for i in range(i+1)])
varsums = pd.Series(sumd, name="sum_var", index=range(smax))
pisdat = pd.Series(piscounts, name="pis", index=range(smax)).fillna(0)
sumd = {}
for i in range(smax):
sumd[i] = np.sum([i*pisdat.values[i] for i in range(i+1)])
pissums = pd.Series(sumd, name="sum_pis", index=range(smax))
print(u"\n\n\n## The distribution of SNPs (var and pis) per locus."+\
u"\n## var = Number of loci with n variable sites (pis + autapomorphies)"+\
u"\n## pis = Number of loci with n parsimony informative site (minor allele in >1 sample)"+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_snps\n",
file=outstats)
data.stats_dfs.s7_snps = pd.concat([vardat, varsums, pisdat, pissums],
axis=1)
data.stats_dfs.s7_snps.to_string(buf=outstats)
##########################################################################
## print the stats summary (-r summary) with final sample loci data.
fullstat = data.stats
fullstat['state'] = 7
fullstat["loci_in_assembly"] = data.stats_dfs.s7_samples
print(u"\n\n\n## Final Sample stats summary\n", file=outstats)
fullstat.to_string(buf=outstats)
## close it
outstats.close()
co5.close()
|
def make_stats(data, samples, samplecounts, locuscounts):
""" write the output stats file and save to Assembly obj."""
## get meta info
with h5py.File(data.clust_database, 'r') as io5:
anames = io5["seqs"].attrs["samples"]
nloci = io5["seqs"].shape[0]
optim = io5["seqs"].attrs["chunksize"][0]
## open the out handle. This will have three data frames saved to it.
## locus_filtering, sample_coverages, and snp_distributions
data.stats_files.s7 = os.path.join(data.dirs.outfiles,
data.name+"_stats.txt")
outstats = io.open(data.stats_files.s7, 'w', encoding="utf-8")
########################################################################
## get stats for locus_filtering, use chunking.
filters = np.zeros(6, dtype=int)
passed = 0
start = 0
piscounts = Counter()
varcounts = Counter()
for i in range(200):
piscounts[i] = 0
varcounts[i] = 0
applied = pd.Series([0]*8,
name="applied_order",
index=[
"total_prefiltered_loci",
"filtered_by_rm_duplicates",
"filtered_by_max_indels",
"filtered_by_max_snps",
"filtered_by_max_shared_het",
"filtered_by_min_sample",
"filtered_by_max_alleles",
"total_filtered_loci"])
## load the h5 database
co5 = h5py.File(data.database, 'r')
while start < nloci:
hslice = [start, start+optim]
## load each array
afilt = co5["filters"][hslice[0]:hslice[1], ]
asnps = co5["snps"][hslice[0]:hslice[1], ]
## get subarray results from filter array
# max_indels, max_snps, max_hets, min_samps, bad_edges, max_alleles
filters += afilt.sum(axis=0)
applied["filtered_by_rm_duplicates"] += afilt[:, 0].sum()
mask = afilt[:, 0].astype(np.bool)
applied["filtered_by_max_indels"] += afilt[~mask, 1].sum()
mask = afilt[:, 0:2].sum(axis=1).astype(np.bool)
applied["filtered_by_max_snps"] += afilt[~mask, 2].sum()
mask = afilt[:, 0:3].sum(axis=1).astype(np.bool)
applied["filtered_by_max_shared_het"] += afilt[~mask, 3].sum()
mask = afilt[:, 0:4].sum(axis=1).astype(np.bool)
applied["filtered_by_min_sample"] += afilt[~mask, 4].sum()
mask = afilt[:, 0:5].sum(axis=1).astype(np.bool)
applied["filtered_by_max_alleles"] += afilt[~mask, 5].sum()
passed += np.sum(afilt.sum(axis=1) == 0)
## get filter to count snps for only passed loci
## should we filter by all vars, or just by pis? doing all var now.
apply_filter = afilt.sum(axis=1).astype(np.bool)
## get snps counts
snplocs = asnps[~apply_filter, :].sum(axis=1)
varlocs = snplocs.sum(axis=1)
varcounts.update(Counter(varlocs))
#snpcounts.update(Counter(snplocs[:, 0]))
piscounts.update(Counter(snplocs[:, 1]))
## increase counter to advance through h5 database
start += optim
## record filtering of loci from total to final
filtdat = pd.Series(np.concatenate([[nloci], filters, [passed]]),
name="total_filters",
index=[
"total_prefiltered_loci",
"filtered_by_rm_duplicates",
"filtered_by_max_indels",
"filtered_by_max_snps",
"filtered_by_max_shared_het",
"filtered_by_min_sample",
"filtered_by_max_alleles",
"total_filtered_loci"])
retained = pd.Series([0]*8,
name="retained_loci",
index=[
"total_prefiltered_loci",
"filtered_by_rm_duplicates",
"filtered_by_max_indels",
"filtered_by_max_snps",
"filtered_by_max_shared_het",
"filtered_by_min_sample",
"filtered_by_max_alleles",
"total_filtered_loci"])
retained["total_prefiltered_loci"] = nloci
retained["filtered_by_rm_duplicates"] = nloci - applied["filtered_by_rm_duplicates"]
retained["filtered_by_max_indels"] = retained["filtered_by_rm_duplicates"] - applied["filtered_by_max_indels"]
retained["filtered_by_max_snps"] = retained["filtered_by_max_indels"] - applied["filtered_by_max_snps"]
retained["filtered_by_max_shared_het"] = retained["filtered_by_max_snps"] - applied["filtered_by_max_shared_het"]
retained["filtered_by_min_sample"] = retained["filtered_by_max_shared_het"] - applied["filtered_by_min_sample"]
retained["filtered_by_max_alleles"] = retained["filtered_by_min_sample"] - applied["filtered_by_max_alleles"]
retained["total_filtered_loci"] = passed
print(u"\n\n## The number of loci caught by each filter."+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_filters\n",
file=outstats)
data.stats_dfs.s7_filters = pd.DataFrame([filtdat, applied, retained]).T
data.stats_dfs.s7_filters.to_string(buf=outstats)
########################################################################
## make dataframe of sample_coverages
## samplecounts is len of anames from db. Save only samples in samples.
#print(samplecounts)
#samples = [i.name for i in samples]
## get sample names in the order of anames
#sids = [list(anames).index(i) for i in samples]
#covdict = {name: val for name, val in zip(np.array(samples)[sidx], samplecounts)}
#covdict = {name: val for name, val in zip(samples, samplecounts[sidx])}
covdict = pd.Series(samplecounts, name="sample_coverage", index=anames)
covdict = covdict[covdict != 0]
print(u"\n\n\n## The number of loci recovered for each Sample."+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_samples\n",
file=outstats)
data.stats_dfs.s7_samples = pd.DataFrame(covdict)
data.stats_dfs.s7_samples.to_string(buf=outstats)
########################################################################
## get stats for locus coverage
lrange = range(1, len(samples)+1)
locdat = pd.Series(locuscounts, name="locus_coverage", index=lrange)
start = data.paramsdict["min_samples_locus"]-1
locsums = pd.Series({i: np.sum(locdat.values[start:i]) for i in lrange},
name="sum_coverage", index=lrange)
print(u"\n\n\n## The number of loci for which N taxa have data."+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_loci\n",
file=outstats)
data.stats_dfs.s7_loci = pd.concat([locdat, locsums], axis=1)
data.stats_dfs.s7_loci.to_string(buf=outstats)
#########################################################################
## get stats for SNP_distribution
try:
smax = max([i+1 for i in varcounts if varcounts[i]])
except Exception as inst:
raise IPyradWarningExit("""
Exception: empty varcounts array. This could be because no samples
passed filtering, or it could be because you have overzealous filtering.
Check the values for `trim_loci` and make sure you are not trimming the
edge too far
""")
vardat = pd.Series(varcounts, name="var", index=range(smax)).fillna(0)
sumd = {}
for i in range(smax):
sumd[i] = np.sum([i*vardat.values[i] for i in range(i+1)])
varsums = pd.Series(sumd, name="sum_var", index=range(smax))
pisdat = pd.Series(piscounts, name="pis", index=range(smax)).fillna(0)
sumd = {}
for i in range(smax):
sumd[i] = np.sum([i*pisdat.values[i] for i in range(i+1)])
pissums = pd.Series(sumd, name="sum_pis", index=range(smax))
print(u"\n\n\n## The distribution of SNPs (var and pis) per locus."+\
u"\n## var = Number of loci with n variable sites (pis + autapomorphies)"+\
u"\n## pis = Number of loci with n parsimony informative site (minor allele in >1 sample)"+\
u"\n## ipyrad API location: [assembly].stats_dfs.s7_snps\n",
file=outstats)
data.stats_dfs.s7_snps = pd.concat([vardat, varsums, pisdat, pissums],
axis=1)
data.stats_dfs.s7_snps.to_string(buf=outstats)
##########################################################################
## print the stats summary (-r summary) with final sample loci data.
fullstat = data.stats
fullstat['state'] = 7
fullstat["loci_in_assembly"] = data.stats_dfs.s7_samples
print(u"\n\n\n## Final Sample stats summary\n", file=outstats)
fullstat.to_string(buf=outstats)
## close it
outstats.close()
co5.close()
|
[
"write",
"the",
"output",
"stats",
"file",
"and",
"save",
"to",
"Assembly",
"obj",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/write_outfiles.py#L124-L322
|
[
"def",
"make_stats",
"(",
"data",
",",
"samples",
",",
"samplecounts",
",",
"locuscounts",
")",
":",
"## get meta info",
"with",
"h5py",
".",
"File",
"(",
"data",
".",
"clust_database",
",",
"'r'",
")",
"as",
"io5",
":",
"anames",
"=",
"io5",
"[",
"\"seqs\"",
"]",
".",
"attrs",
"[",
"\"samples\"",
"]",
"nloci",
"=",
"io5",
"[",
"\"seqs\"",
"]",
".",
"shape",
"[",
"0",
"]",
"optim",
"=",
"io5",
"[",
"\"seqs\"",
"]",
".",
"attrs",
"[",
"\"chunksize\"",
"]",
"[",
"0",
"]",
"## open the out handle. This will have three data frames saved to it.",
"## locus_filtering, sample_coverages, and snp_distributions",
"data",
".",
"stats_files",
".",
"s7",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"outfiles",
",",
"data",
".",
"name",
"+",
"\"_stats.txt\"",
")",
"outstats",
"=",
"io",
".",
"open",
"(",
"data",
".",
"stats_files",
".",
"s7",
",",
"'w'",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"########################################################################",
"## get stats for locus_filtering, use chunking.",
"filters",
"=",
"np",
".",
"zeros",
"(",
"6",
",",
"dtype",
"=",
"int",
")",
"passed",
"=",
"0",
"start",
"=",
"0",
"piscounts",
"=",
"Counter",
"(",
")",
"varcounts",
"=",
"Counter",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"200",
")",
":",
"piscounts",
"[",
"i",
"]",
"=",
"0",
"varcounts",
"[",
"i",
"]",
"=",
"0",
"applied",
"=",
"pd",
".",
"Series",
"(",
"[",
"0",
"]",
"*",
"8",
",",
"name",
"=",
"\"applied_order\"",
",",
"index",
"=",
"[",
"\"total_prefiltered_loci\"",
",",
"\"filtered_by_rm_duplicates\"",
",",
"\"filtered_by_max_indels\"",
",",
"\"filtered_by_max_snps\"",
",",
"\"filtered_by_max_shared_het\"",
",",
"\"filtered_by_min_sample\"",
",",
"\"filtered_by_max_alleles\"",
",",
"\"total_filtered_loci\"",
"]",
")",
"## load the h5 database",
"co5",
"=",
"h5py",
".",
"File",
"(",
"data",
".",
"database",
",",
"'r'",
")",
"while",
"start",
"<",
"nloci",
":",
"hslice",
"=",
"[",
"start",
",",
"start",
"+",
"optim",
"]",
"## load each array",
"afilt",
"=",
"co5",
"[",
"\"filters\"",
"]",
"[",
"hslice",
"[",
"0",
"]",
":",
"hslice",
"[",
"1",
"]",
",",
"]",
"asnps",
"=",
"co5",
"[",
"\"snps\"",
"]",
"[",
"hslice",
"[",
"0",
"]",
":",
"hslice",
"[",
"1",
"]",
",",
"]",
"## get subarray results from filter array",
"# max_indels, max_snps, max_hets, min_samps, bad_edges, max_alleles",
"filters",
"+=",
"afilt",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"applied",
"[",
"\"filtered_by_rm_duplicates\"",
"]",
"+=",
"afilt",
"[",
":",
",",
"0",
"]",
".",
"sum",
"(",
")",
"mask",
"=",
"afilt",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"applied",
"[",
"\"filtered_by_max_indels\"",
"]",
"+=",
"afilt",
"[",
"~",
"mask",
",",
"1",
"]",
".",
"sum",
"(",
")",
"mask",
"=",
"afilt",
"[",
":",
",",
"0",
":",
"2",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"applied",
"[",
"\"filtered_by_max_snps\"",
"]",
"+=",
"afilt",
"[",
"~",
"mask",
",",
"2",
"]",
".",
"sum",
"(",
")",
"mask",
"=",
"afilt",
"[",
":",
",",
"0",
":",
"3",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"applied",
"[",
"\"filtered_by_max_shared_het\"",
"]",
"+=",
"afilt",
"[",
"~",
"mask",
",",
"3",
"]",
".",
"sum",
"(",
")",
"mask",
"=",
"afilt",
"[",
":",
",",
"0",
":",
"4",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"applied",
"[",
"\"filtered_by_min_sample\"",
"]",
"+=",
"afilt",
"[",
"~",
"mask",
",",
"4",
"]",
".",
"sum",
"(",
")",
"mask",
"=",
"afilt",
"[",
":",
",",
"0",
":",
"5",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"applied",
"[",
"\"filtered_by_max_alleles\"",
"]",
"+=",
"afilt",
"[",
"~",
"mask",
",",
"5",
"]",
".",
"sum",
"(",
")",
"passed",
"+=",
"np",
".",
"sum",
"(",
"afilt",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"==",
"0",
")",
"## get filter to count snps for only passed loci",
"## should we filter by all vars, or just by pis? doing all var now.",
"apply_filter",
"=",
"afilt",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"## get snps counts",
"snplocs",
"=",
"asnps",
"[",
"~",
"apply_filter",
",",
":",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"varlocs",
"=",
"snplocs",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"varcounts",
".",
"update",
"(",
"Counter",
"(",
"varlocs",
")",
")",
"#snpcounts.update(Counter(snplocs[:, 0]))",
"piscounts",
".",
"update",
"(",
"Counter",
"(",
"snplocs",
"[",
":",
",",
"1",
"]",
")",
")",
"## increase counter to advance through h5 database",
"start",
"+=",
"optim",
"## record filtering of loci from total to final",
"filtdat",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"concatenate",
"(",
"[",
"[",
"nloci",
"]",
",",
"filters",
",",
"[",
"passed",
"]",
"]",
")",
",",
"name",
"=",
"\"total_filters\"",
",",
"index",
"=",
"[",
"\"total_prefiltered_loci\"",
",",
"\"filtered_by_rm_duplicates\"",
",",
"\"filtered_by_max_indels\"",
",",
"\"filtered_by_max_snps\"",
",",
"\"filtered_by_max_shared_het\"",
",",
"\"filtered_by_min_sample\"",
",",
"\"filtered_by_max_alleles\"",
",",
"\"total_filtered_loci\"",
"]",
")",
"retained",
"=",
"pd",
".",
"Series",
"(",
"[",
"0",
"]",
"*",
"8",
",",
"name",
"=",
"\"retained_loci\"",
",",
"index",
"=",
"[",
"\"total_prefiltered_loci\"",
",",
"\"filtered_by_rm_duplicates\"",
",",
"\"filtered_by_max_indels\"",
",",
"\"filtered_by_max_snps\"",
",",
"\"filtered_by_max_shared_het\"",
",",
"\"filtered_by_min_sample\"",
",",
"\"filtered_by_max_alleles\"",
",",
"\"total_filtered_loci\"",
"]",
")",
"retained",
"[",
"\"total_prefiltered_loci\"",
"]",
"=",
"nloci",
"retained",
"[",
"\"filtered_by_rm_duplicates\"",
"]",
"=",
"nloci",
"-",
"applied",
"[",
"\"filtered_by_rm_duplicates\"",
"]",
"retained",
"[",
"\"filtered_by_max_indels\"",
"]",
"=",
"retained",
"[",
"\"filtered_by_rm_duplicates\"",
"]",
"-",
"applied",
"[",
"\"filtered_by_max_indels\"",
"]",
"retained",
"[",
"\"filtered_by_max_snps\"",
"]",
"=",
"retained",
"[",
"\"filtered_by_max_indels\"",
"]",
"-",
"applied",
"[",
"\"filtered_by_max_snps\"",
"]",
"retained",
"[",
"\"filtered_by_max_shared_het\"",
"]",
"=",
"retained",
"[",
"\"filtered_by_max_snps\"",
"]",
"-",
"applied",
"[",
"\"filtered_by_max_shared_het\"",
"]",
"retained",
"[",
"\"filtered_by_min_sample\"",
"]",
"=",
"retained",
"[",
"\"filtered_by_max_shared_het\"",
"]",
"-",
"applied",
"[",
"\"filtered_by_min_sample\"",
"]",
"retained",
"[",
"\"filtered_by_max_alleles\"",
"]",
"=",
"retained",
"[",
"\"filtered_by_min_sample\"",
"]",
"-",
"applied",
"[",
"\"filtered_by_max_alleles\"",
"]",
"retained",
"[",
"\"total_filtered_loci\"",
"]",
"=",
"passed",
"print",
"(",
"u\"\\n\\n## The number of loci caught by each filter.\"",
"+",
"u\"\\n## ipyrad API location: [assembly].stats_dfs.s7_filters\\n\"",
",",
"file",
"=",
"outstats",
")",
"data",
".",
"stats_dfs",
".",
"s7_filters",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"filtdat",
",",
"applied",
",",
"retained",
"]",
")",
".",
"T",
"data",
".",
"stats_dfs",
".",
"s7_filters",
".",
"to_string",
"(",
"buf",
"=",
"outstats",
")",
"########################################################################",
"## make dataframe of sample_coverages",
"## samplecounts is len of anames from db. Save only samples in samples.",
"#print(samplecounts)",
"#samples = [i.name for i in samples]",
"## get sample names in the order of anames",
"#sids = [list(anames).index(i) for i in samples]",
"#covdict = {name: val for name, val in zip(np.array(samples)[sidx], samplecounts)}",
"#covdict = {name: val for name, val in zip(samples, samplecounts[sidx])}",
"covdict",
"=",
"pd",
".",
"Series",
"(",
"samplecounts",
",",
"name",
"=",
"\"sample_coverage\"",
",",
"index",
"=",
"anames",
")",
"covdict",
"=",
"covdict",
"[",
"covdict",
"!=",
"0",
"]",
"print",
"(",
"u\"\\n\\n\\n## The number of loci recovered for each Sample.\"",
"+",
"u\"\\n## ipyrad API location: [assembly].stats_dfs.s7_samples\\n\"",
",",
"file",
"=",
"outstats",
")",
"data",
".",
"stats_dfs",
".",
"s7_samples",
"=",
"pd",
".",
"DataFrame",
"(",
"covdict",
")",
"data",
".",
"stats_dfs",
".",
"s7_samples",
".",
"to_string",
"(",
"buf",
"=",
"outstats",
")",
"########################################################################",
"## get stats for locus coverage",
"lrange",
"=",
"range",
"(",
"1",
",",
"len",
"(",
"samples",
")",
"+",
"1",
")",
"locdat",
"=",
"pd",
".",
"Series",
"(",
"locuscounts",
",",
"name",
"=",
"\"locus_coverage\"",
",",
"index",
"=",
"lrange",
")",
"start",
"=",
"data",
".",
"paramsdict",
"[",
"\"min_samples_locus\"",
"]",
"-",
"1",
"locsums",
"=",
"pd",
".",
"Series",
"(",
"{",
"i",
":",
"np",
".",
"sum",
"(",
"locdat",
".",
"values",
"[",
"start",
":",
"i",
"]",
")",
"for",
"i",
"in",
"lrange",
"}",
",",
"name",
"=",
"\"sum_coverage\"",
",",
"index",
"=",
"lrange",
")",
"print",
"(",
"u\"\\n\\n\\n## The number of loci for which N taxa have data.\"",
"+",
"u\"\\n## ipyrad API location: [assembly].stats_dfs.s7_loci\\n\"",
",",
"file",
"=",
"outstats",
")",
"data",
".",
"stats_dfs",
".",
"s7_loci",
"=",
"pd",
".",
"concat",
"(",
"[",
"locdat",
",",
"locsums",
"]",
",",
"axis",
"=",
"1",
")",
"data",
".",
"stats_dfs",
".",
"s7_loci",
".",
"to_string",
"(",
"buf",
"=",
"outstats",
")",
"#########################################################################",
"## get stats for SNP_distribution",
"try",
":",
"smax",
"=",
"max",
"(",
"[",
"i",
"+",
"1",
"for",
"i",
"in",
"varcounts",
"if",
"varcounts",
"[",
"i",
"]",
"]",
")",
"except",
"Exception",
"as",
"inst",
":",
"raise",
"IPyradWarningExit",
"(",
"\"\"\"\n Exception: empty varcounts array. This could be because no samples \n passed filtering, or it could be because you have overzealous filtering.\n Check the values for `trim_loci` and make sure you are not trimming the\n edge too far\n \"\"\"",
")",
"vardat",
"=",
"pd",
".",
"Series",
"(",
"varcounts",
",",
"name",
"=",
"\"var\"",
",",
"index",
"=",
"range",
"(",
"smax",
")",
")",
".",
"fillna",
"(",
"0",
")",
"sumd",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"smax",
")",
":",
"sumd",
"[",
"i",
"]",
"=",
"np",
".",
"sum",
"(",
"[",
"i",
"*",
"vardat",
".",
"values",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"i",
"+",
"1",
")",
"]",
")",
"varsums",
"=",
"pd",
".",
"Series",
"(",
"sumd",
",",
"name",
"=",
"\"sum_var\"",
",",
"index",
"=",
"range",
"(",
"smax",
")",
")",
"pisdat",
"=",
"pd",
".",
"Series",
"(",
"piscounts",
",",
"name",
"=",
"\"pis\"",
",",
"index",
"=",
"range",
"(",
"smax",
")",
")",
".",
"fillna",
"(",
"0",
")",
"sumd",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"smax",
")",
":",
"sumd",
"[",
"i",
"]",
"=",
"np",
".",
"sum",
"(",
"[",
"i",
"*",
"pisdat",
".",
"values",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"i",
"+",
"1",
")",
"]",
")",
"pissums",
"=",
"pd",
".",
"Series",
"(",
"sumd",
",",
"name",
"=",
"\"sum_pis\"",
",",
"index",
"=",
"range",
"(",
"smax",
")",
")",
"print",
"(",
"u\"\\n\\n\\n## The distribution of SNPs (var and pis) per locus.\"",
"+",
"u\"\\n## var = Number of loci with n variable sites (pis + autapomorphies)\"",
"+",
"u\"\\n## pis = Number of loci with n parsimony informative site (minor allele in >1 sample)\"",
"+",
"u\"\\n## ipyrad API location: [assembly].stats_dfs.s7_snps\\n\"",
",",
"file",
"=",
"outstats",
")",
"data",
".",
"stats_dfs",
".",
"s7_snps",
"=",
"pd",
".",
"concat",
"(",
"[",
"vardat",
",",
"varsums",
",",
"pisdat",
",",
"pissums",
"]",
",",
"axis",
"=",
"1",
")",
"data",
".",
"stats_dfs",
".",
"s7_snps",
".",
"to_string",
"(",
"buf",
"=",
"outstats",
")",
"##########################################################################",
"## print the stats summary (-r summary) with final sample loci data.",
"fullstat",
"=",
"data",
".",
"stats",
"fullstat",
"[",
"'state'",
"]",
"=",
"7",
"fullstat",
"[",
"\"loci_in_assembly\"",
"]",
"=",
"data",
".",
"stats_dfs",
".",
"s7_samples",
"print",
"(",
"u\"\\n\\n\\n## Final Sample stats summary\\n\"",
",",
"file",
"=",
"outstats",
")",
"fullstat",
".",
"to_string",
"(",
"buf",
"=",
"outstats",
")",
"## close it",
"outstats",
".",
"close",
"(",
")",
"co5",
".",
"close",
"(",
")"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
valid
|
select_samples
|
Get the row index of samples that are included. If samples are in the
'excluded' they were already filtered out of 'samples' during _get_samples.
|
ipyrad/assemble/write_outfiles.py
|
def select_samples(dbsamples, samples, pidx=None):
"""
Get the row index of samples that are included. If samples are in the
'excluded' they were already filtered out of 'samples' during _get_samples.
"""
## get index from dbsamples
samples = [i.name for i in samples]
if pidx:
sidx = [list(dbsamples[pidx]).index(i) for i in samples]
else:
sidx = [list(dbsamples).index(i) for i in samples]
sidx.sort()
return sidx
|
def select_samples(dbsamples, samples, pidx=None):
"""
Get the row index of samples that are included. If samples are in the
'excluded' they were already filtered out of 'samples' during _get_samples.
"""
## get index from dbsamples
samples = [i.name for i in samples]
if pidx:
sidx = [list(dbsamples[pidx]).index(i) for i in samples]
else:
sidx = [list(dbsamples).index(i) for i in samples]
sidx.sort()
return sidx
|
[
"Get",
"the",
"row",
"index",
"of",
"samples",
"that",
"are",
"included",
".",
"If",
"samples",
"are",
"in",
"the",
"excluded",
"they",
"were",
"already",
"filtered",
"out",
"of",
"samples",
"during",
"_get_samples",
"."
] |
dereneaton/ipyrad
|
python
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/write_outfiles.py#L326-L338
|
[
"def",
"select_samples",
"(",
"dbsamples",
",",
"samples",
",",
"pidx",
"=",
"None",
")",
":",
"## get index from dbsamples",
"samples",
"=",
"[",
"i",
".",
"name",
"for",
"i",
"in",
"samples",
"]",
"if",
"pidx",
":",
"sidx",
"=",
"[",
"list",
"(",
"dbsamples",
"[",
"pidx",
"]",
")",
".",
"index",
"(",
"i",
")",
"for",
"i",
"in",
"samples",
"]",
"else",
":",
"sidx",
"=",
"[",
"list",
"(",
"dbsamples",
")",
".",
"index",
"(",
"i",
")",
"for",
"i",
"in",
"samples",
"]",
"sidx",
".",
"sort",
"(",
")",
"return",
"sidx"
] |
5eeb8a178160f45faf71bf47cec4abe998a575d1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.