tag 标签: filter

相关博文
  • 热度 2
    2023-10-23 10:20
    71 次阅读|
    0 个评论
    import pandas as pd import re import os from tkinter import messagebox #------------------------- update foder path---------------------- folder = r'D:\ NINK' rawdata_name= 'rawdataTrans.csv' #------------------------- update foder path---------------------- #------------------------- update filter spec path---------------------- spec_path = r'D:\ spec_filter_Big.csv' #------------------------- update foder path---------------------------- #------------------------- update Parameter---------------------- Good = '.' Fail = 'X' wafer_column_name = 'WAFER' Xcord_column = 'DIE_X' Ycord_column = 'DIE_Y' Filter = 'Filter' n_star = 10 n_end = 23 #------------------------- update Parameter---------------------- file_path =os.path.join(folder , rawdata_name) rawdata = pd.read_csv(file_path , encoding = "utf-8" ) CP_SPC = pd.read_csv(spec_path , index_col = , encoding = "utf-8" ) column_list = list (rawdata.columns) if True : rawdata = 0 for row_index in rawdata.index: for column_index_key in CP_SPC.index: try : if rawdata.loc < CP_SPC.loc or \ rawdata.loc > CP_SPC.loc or \ pd.isna(rawdata.loc ): rawdata.loc = CP_SPC.loc break else : continue except KeyError : continue def caculate_CP_bin (rawdata , split , key , bin , BIN): if rawdata == key) & (rawdata == bin)] is not None : bin_rate = rawdata == key) & (rawdata == bin)] .size / \ rawdata == key] .size else : bin_rate = 0 return bin_rate waferlist = sorted (rawdata .unique()) bin0 = bin1 = bin2 = bin3 = bin4 = bin5 = for Yield_wafer_number in waferlist: bin0.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 0 , BIN = 'BIN' )) bin0_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 0 , BIN =Filter)) bin1.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 1 , BIN = 'BIN' )) bin1_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 1 , BIN =Filter)) bin2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 2 , BIN = 'BIN' )) bin2_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 2 , BIN =Filter)) bin3.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 3 , BIN = 'BIN' )) bin3_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 3 , BIN =Filter)) bin4.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 4 , BIN = 'BIN' )) bin4_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 4 , BIN =Filter)) bin5.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 5 , BIN = 'BIN' )) bin5_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 5 , BIN =Filter)) yield_key2 = { 'WAFER' : waferlist , 'Yield' : bin0 , 'IGES_Fail' : bin1 , 'ICES_Fail' : bin2 , 'VTH_Fail' : bin3 , 'Delta_Vth fail' : bin4 , 'VCESAT_Fail' : bin5 , 'Yield_filter' : bin0_2 , 'IGES_Fail_filter' : bin1_2 , 'ICES_Fail_filter' : bin2_2 , 'VTH_Fail_filter' : bin3_2 , 'Delta_Vth fail_filter' : bin4_2 , 'VCESAT_Fail_filter' : bin5_2} Yield_key_table2 = pd.DataFrame(yield_key2) yieldTAB = os.path.join(folder , 'yield for compare.csv' ) Yield_key_table2.to_csv(yieldTAB , index = False ) map_file_names = for map_file_name in map_file_names: map_file_path = os.path.join(folder , map_file_name) wafer= int (map_file_name ) with open (map_file_path) as f: raw_list = f.readlines() Map_tables = rawdata == wafer].pivot_table( index =Ycord_column , columns =Xcord_column , values =Filter) new = Map_tables.fillna( ' ' ).replace( 0.0 , Good).replace( 1.0 , Fail).replace( 2.0 , Fail).replace( 3.0 , Fail).replace( 5.0 , Fail).replace( 7.0 , Fail).replace( 4.0 , Fail) filter_list = .astype( str ) filter_list.append( '' .join( list (array))) filter_list.append( 'END MAP' ) for t in range (n_star , n_end , 1 ): merged_str = '' for raw , filter in zip (raw_list , filter_list ): if filter == Fail: merged_str += filter else : merged_str += raw raw_list = merged_str + ' \n ' new_failure_count = 0 nof_die = 0 for t in range (n_star , n_end- 1 ): new_failure_count += raw_list .count(Fail) nof_die += raw_list .count(Good)+raw_list .count(Fail) raw_list = 'Failed Die(X) : {}' .format(new_failure_count) + ' \n ' raw_list = 'CP Good Die(.) : {}' .format(nof_die-new_failure_count)+ ' \n ' raw_list = 'CP Good Die(.) : {}' .format(nof_die-new_failure_count) with open (map_file_path , 'w' ) as wr_new: for l in raw_list: wr_new.write(l) output_folder = os.path.join(folder , 'newformat.csv' ) rawdata.to_csv(output_folder , index = False, encoding = "utf-8_sig" ) messagebox.showinfo( 'Status' , 'Map & 数据已成功导出 ' ) else : messagebox.showwarning( ' 注意 ' , 'columns not match,please check rawdata' )
  • 热度 3
    2023-10-23 10:18
    59 次阅读|
    0 个评论
    import pandas as pd import re import os from tkinter import messagebox #------------------------- update foder path---------------------- folder = r'D:\ 联电自研 \IGBT HH\MAP\ENEPIG_DOE5\FA37-9341@202\trial' rawdata_name= 'rawdataTrans.csv' #------------------------- update foder path---------------------- #------------------------- update filter spec path---------------------- spec_path = r'D:\ 联电自研 \IGBT HH\MAP\ENEPIG_DOE5\spec_filter.csv' #------------------------- update foder path---------------------------- Good = '.' Fail = 'X' wafer_column_name = 'WAFER' Xcord_column = 'X' Ycord_column = 'Y' Filter = 'Filter' file_path =os.path.join(folder , rawdata_name) rawdata = pd.read_csv(file_path , encoding = "utf-8" ) CP_SPC = pd.read_csv(spec_path , index_col = , encoding = "utf-8" ) column_list = list (rawdata.columns) if True : rawdata = 0 for row_index in rawdata.index: for column_index_key in CP_SPC.index: try : if rawdata.loc < CP_SPC.loc or \ rawdata.loc > CP_SPC.loc or \ pd.isna(rawdata.loc ): rawdata.loc = CP_SPC.loc break else : continue except KeyError : continue def caculate_CP_bin (rawdata , split , key , bin , BIN): if rawdata == key) & (rawdata == bin)] is not None : bin_rate = rawdata == key) & (rawdata == bin)] .size / \ rawdata == key] .size else : bin_rate = 0 return bin_rate waferlist = sorted (rawdata .unique()) bin0 = bin1 = bin2 = bin3 = bin4 = bin5 = for Yield_wafer_number in waferlist: bin0.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 0 , BIN = 'BIN' )) bin0_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 0 , BIN =Filter)) bin1.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 1 , BIN = 'BIN' )) bin1_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 1 , BIN =Filter)) bin2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 2 , BIN = 'BIN' )) bin2_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 2 , BIN =Filter)) bin3.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 3 , BIN = 'BIN' )) bin3_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 3 , BIN =Filter)) bin4.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 4 , BIN = 'BIN' )) bin4_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 4 , BIN =Filter)) bin5.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 5 , BIN = 'BIN' )) bin5_2.append(caculate_CP_bin(rawdata , split =wafer_column_name , key =Yield_wafer_number , bin = 5 , BIN =Filter)) yield_key2 = { 'WAFER' : waferlist , 'Yield' : bin0 , 'IGES_Fail' : bin1 , 'ICES_Fail' : bin2 , 'VTH_Fail' : bin3 , 'Delta_Vth fail' : bin4 , 'VCESAT_Fail' : bin5 , 'Yield_filter' : bin0_2 , 'IGES_Fail_filter' : bin1_2 , 'ICES_Fail_filter' : bin2_2 , 'VTH_Fail_filter' : bin3_2 , 'Delta_Vth fail_filter' : bin4_2 , 'VCESAT_Fail_filter' : bin5_2} Yield_key_table2 = pd.DataFrame(yield_key2) yieldTAB = os.path.join(folder , 'yield for compare.csv' ) Yield_key_table2.to_csv(yieldTAB , index = False ) map_file_names = for map_file_name in map_file_names: map_file_path = os.path.join(folder , map_file_name) wafer= int (map_file_name ) with open (map_file_path) as f: raw_list = f.readlines() Map_tables = rawdata == wafer].pivot_table( index =Ycord_column , columns =Xcord_column , values =Filter) new = Map_tables.fillna( ' ' ).replace( 0.0 , Good).replace( 1.0 , Fail).replace( 2.0 , Fail).replace( 3.0 , Fail).replace( 5.0 , Fail).replace( 7.0 , Fail).replace( 4.0 , Fail) filter_list = .astype( str ) filter_list.append( '' .join( list (array))) filter_list.append( 'END MAP' ) n_star = 10 n_end = 29 for t in range (n_star , n_end , 1 ): merged_str = '' for raw , filter in zip (raw_list , filter_list ): if filter== Fail: merged_str += filter else : merged_str += raw raw_list = merged_str + ' \n ' new_failure_count = 0 nof_die = 0 for t in range (n_star , n_end- 1 ): new_failure_count += raw_list .count(Fail) nof_die += raw_list .count(Good)+raw_list .count(Fail) raw_list = 'Failed Die(X) : {}' .format(new_failure_count) + ' \n ' raw_list = 'CP Good Die(.) : {}' .format(nof_die-new_failure_count)+ ' \n ' raw_list = 'CP Good Die(.) : {}' .format(nof_die-new_failure_count) with open (map_file_path , 'w' ) as wr_new: for l in raw_list: wr_new.write(l) output_folder = os.path.join(folder , 'newformat.csv' ) rawdata.to_csv(output_folder , index = False, encoding = "utf-8_sig" ) messagebox.showinfo( 'Status' , 'Map & 数据已成功导出 ' ) else : messagebox.showwarning( ' 注意 ' , 'columns not match,please check rawdata' )
  • 热度 18
    2013-6-4 19:49
    1992 次阅读|
    0 个评论
    One of my favourite new books is " The Information ", James Gleick's new opus. I frequently go back to this book, which is about information theory and Shannon's contributions, among others, to understanding its implications not only to engineering but to any aspect of research into the natural world. While it is technically rough going sometimes, what brings me back again and again to reading it is that it is "the biography of an idea," as one reviewer said. While he does not spare the reader by dumbing down the complex technical issues, Gleick is able to interweave this with the intellectual exploits and personal experiences of those who over the last several hundred years have contributed to our understanding. Coincidentally, as I have been reading it, I have also been making my way through " Understanding the normal distribution ", Jack Crenshaw's most recent Insight blog on the importance of the Kalman algorithm in every aspect of electrical engineering and embedded systems design. According to Wikipedia , (and Jack, of course), the Kalman filter algorithm uses a series of measurements observed over time, containing noise (random variations) and other inaccuracies, and produces estimates of unknown variables that tend to be more precise than those based on a single measurement alone. It operates recursively on streams of noisy input data to produce a statistically optimal estimate of the underlying system state and is commonly used for guidance, navigation and control of vehicles and in a wide-range of digital signal processing applications in wireless networks and MEMS sensor positioning. Jack's most recent blog is also tough going. But rewarding. Once you have read it you will know that you have learned something valuable and useful. The usefulness of this algorithm is far from over. As with Gleick's book, each article and blog I read gives me a more nuanced understanding of this powerful idea, and I would like to continue building an online "biography" of this versatile algorithm. For that I need your help with comments on the site, blogs and design articles submitted about your experiences, as well as hearing from you about interesting articles and papers you have read on this topic.
  • 热度 28
    2013-6-4 19:48
    2458 次阅读|
    0 个评论
    One of my favourite new books and one I frequently go back to is James Gleick's new opus titled " The Information ". It is about information theory and Shannon's contributions, among others, to understanding its implications not only to engineering but to any aspect of research into the natural world. While it is technically rough going sometimes, what brings me back again and again to reading it is that it is "the biography of an idea," as one reviewer said. While he does not spare the reader by dumbing down the complex technical issues, Gleick is able to interweave this with the intellectual exploits and personal experiences of those who over the last several hundred years have contributed to our understanding. Coincidentally, as I have been reading it, I have also been making my way through " Understanding the normal distribution ", Jack Crenshaw's most recent Insight blog on the importance of the Kalman algorithm in every aspect of electrical engineering and embedded systems design. According to Wikipedia , (and Jack, of course), the Kalman filter algorithm uses a series of measurements observed over time, containing noise (random variations) and other inaccuracies, and produces estimates of unknown variables that tend to be more precise than those based on a single measurement alone. It operates recursively on streams of noisy input data to produce a statistically optimal estimate of the underlying system state and is commonly used for guidance, navigation and control of vehicles and in a wide-range of digital signal processing applications in wireless networks and MEMS sensor positioning. Jack's most recent blog is also tough going. But rewarding. Once you have read it you will know that you have learned something valuable and useful. The usefulness of this algorithm is far from over. As with Gleick's book, each article and blog I read gives me a more nuanced understanding of this powerful idea, and I would like to continue building an online "biography" of this versatile algorithm. For that I need your help with comments on the site, blogs and design articles submitted about your experiences, as well as hearing from you about interesting articles and papers you have read on this topic.  
  • 热度 14
    2013-3-17 19:50
    4525 次阅读|
    2 个评论
    In my last blog, we saw an application in medical electronics. Let’s continue that in this blog too where we will see a circuit that is used to eliminate noise in the signal processed by the ECG.   Coming back to ECG, we know it is a piece of equipment which measures the beating of the heart. We saw the bio-electric amplifier which amplifies the signals generated by the beating for further processing. If we plot the frequency spectrum of such an input signal you are bound to find a peak at about 60 Hz (or 50 Hz in some cases) – something like the diagram below.   http://upload.wikimedia.org/wikipedia/commons/thumb/7/73/Mains_hum_spectrum.png/220px-Mains_hum_spectrum.png   You will see a peak at around 60 Hz. Why is this coming? This is due to 60 Hz noise. And the circuit we will be seeing is called the 60-Hz reject filter with gain .   Why does this peak arise? This noise arises from the interference from other current carrying conductors present in the same room. When current flows through a conductor it tends to induce a small current in any other equipment near it. Suppose we have a wire carrying a high current. According to the laws of electromagnetism, this induces a magnetic field around it. This magnetic field in turn induces some current in any circuit nearby. This noisy signal will be at a frequency of 60 Hz which is the frequency of the mains current in many countries (50 Hz in some). So before processing our required signal we must filter it for this noise. For that purpose we use a notch filter . A notch filter is a type of band-stop filter which has a very steep notch at particular frequency. That is at a particular frequency the signal is completely attenuated.   The notch filter we will use is the twin-T notch filter . This is made up of two T-sections – one of low pass and another high pass section – in parallel. The low pass and high pass sections are designed such that they don’t overlap and they both allow low frequencies below the notch frequencies and those above it but not the frequencies at the notch frequency. Let’s see the circuit for it:     You can see that the input signal is given to a twin-T network. The upper part is the low pass circuit and the lower part is the high pass filter. Remember that at high frequencies the impedance of a capacitor is very low while that of an inductor is very high and vice versa. As a result in the upper network at high frequencies the capacitance will be very low - almost a short. Thus the voltage across it is zero. The similar concept is applied to the high pass section. The design is as follows: notch frequency is given by f = ; R0=R1= 2.R2 and C1=C2= C0.   Gain can be introduced in this circuit as the opamp is connected as an amplifier. The gain will be determined by the ratio of  the resistors R4 and R3.   This is the frequency response:     Note the notch at 60 Hz, and also note the quality factor is not too good. For our application, the quality factor must be much better than this.   For that in real life a reject filter with variable Q is used. The circuit is given below: The Q in this circuit depends on the ratio of resistors R6 and R7. . Increase this ratio and note how the Q varies.   You can check these videos out: https://www.youtube.com/watch?v=sBEFyMbi2Zo For another application of notch filter: https://www.youtube.com/watch?v=01EB6W0HZuk
相关资源