Stripped down recorder, copied bits from ozel

main
jebba 2022-01-09 13:15:55 -07:00
parent f9b1c96e88
commit 4c4ed2e4c1
3 changed files with 122 additions and 0 deletions

View File

@ -2,6 +2,7 @@
witgit is free software gamma ray spectroscopy software
under development intended to be used
with low cost and DIY hardware running on a free operating system.
For educational use only, and probably not even that.
# Hardware

View File

@ -3,4 +3,5 @@ msgpack
numpy
pandas
pyaudio
pyqtgraph
scipy

View File

@ -10,3 +10,123 @@ https://github.com/ozel/DIY_particle_detector
@date: January 2022
"""
import sys
import time
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
import numpy as np
import pyqtgraph as pg
import pyaudio
import pandas as pd
import datetime
from functools import partial
from scipy.signal import argrelextrema
class Scope(QtGui.QMainWindow):
def __init__(self, parent=None):
global app
QtGui.QMainWindow.__init__(self, parent)
self.app = app
self.app.aboutToQuit.connect(self.close)
self.pcounter=0
self.creation_time=datetime.datetime.now()
self.df = pd.DataFrame(columns = ['ts','ptype'])
self.ptypes = pd.Series(["alpha", "beta", "betagamma", "x-ray", "muon" ,"unknown"], dtype="category")
self.thl = -300
self.hl = -1243 # green cursor, highlight line for measuring only
self.peaks=[]
def audio_callback(in_data, frame_count, time_info, status):
now = time.time()
samples = np.frombuffer(in_data, dtype=np.int16)
peak = samples.min()
if peak < self.thl:
t = pd.datetime.fromtimestamp(now)
print("* ", t, end="")
pulse = pd.DataFrame()
pulse = pulse.assign(ts=[t])
if peak < -1243:
pulse = pulse.assign(ptype=[self.ptypes[0]]) #alpha
print(" alpha ", end="")
else:
pulse = pulse.assign(ptype=[self.ptypes[1]]) #beta/electron
print(" elect ", end="")
print(self.pcounter, " ", end="")
print(peak)
minima=argrelextrema(samples, np.less)
self.peaks.append(sum(minima[0])/len(minima[0]/2))
self.peaks = self.peaks[-100:] #only keep the last 100 for averaging
pulse = pulse.assign(pulse=[samples])
self.df = self.df.append(pulse, ignore_index=True,sort=False)
self.pcounter+=1
# calculate pulse rate in counts per second
dt = (now-self.lastupdate)
if dt <= 0:
dt = 0.000000000001
cps2 = 1.0 / dt
self.lastupdate = now
self.cps = self.cps * 0.9 + cps2 * 0.1 # simple weighted average
tx = 'Mean pulse rate: {cps:.1f} CPS'.format(cps=self.cps )
self.label.setText(tx + ", THL (red): " + str(self.thl) + ", cursor(green): " + str(self.hl) + ", (avg peak: "+str(round(sum(self.peaks)/100,1)) + ")")
self.ydata=np.frombuffer(in_data, dtype=np.int16)
self.frame_counter+=frame_count
self.h2.setData(self.ydata)
self.thlp.setData(4096*[self.thl])
self.hlp.setData(4096*[self.hl]) #draw green highlight line
return (in_data, pyaudio.paContinue)
#### Create Gui Elements ###########
self.mainbox = QtGui.QWidget()
self.setCentralWidget(self.mainbox)
self.mainbox.setLayout(QtGui.QVBoxLayout())
self.canvas = pg.GraphicsLayoutWidget()
self.mainbox.layout().addWidget(self.canvas)
self.label = QtGui.QLabel()
self.mainbox.layout().addWidget(self.label)
self.otherplot = self.canvas.addPlot()
self.h2 = self.otherplot.plot(pen='y')
self.thlp = self.otherplot.plot(pen='r')
self.hlp = self.otherplot.plot(pen='g')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16, channels=1, rate=48000, input=True, frames_per_buffer=4096,stream_callback=audio_callback)
#### Set Data #####################
self.x = np.linspace(0,50., num=100)
self.X,self.Y = np.meshgrid(self.x,self.x)
self.frame_counter = 0
self.cps = 0.
self.lastupdate = time.time()
#### Start #####################
self.stream.start_stream()
def close_stream(self):
self.stream.close()
self.stream.stop_stream()
def close(self):
timediff = datetime.datetime.now() - self.creation_time
self.close_stream()
if self.pcounter > 0:
td_str = '-'.join(str(timediff).split(':')[:2])
_ = self.df.to_pickle("./data" + self.creation_time.strftime("/pulses_%Y-%m-%d_%H-%M-%S") + "___" + str(self.pcounter) + "___" + td_str + ".pkl")
print('Number of recorded waveforms:', self.pcounter, "of",self.frame_counter, "total audio frames")
print(len(self.df[self.df['ptype'] == 'alpha']) ,"alphas detected")
print(len(self.df[self.df['ptype'] == 'beta']) ,"electrons/betas detected")
self.p.terminate()
app.exit()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
mainWin = Scope()
mainWin.show()
app.exec_()